123456789101112131415161718192021222324252627282930313233343536373839404142434445464748495051525354555657585960616263646566676869707172737475767778798081828384858687888990919293949596979899100101102103104105106107108109110111112113114115116117118119120121122123124125126127128129130131132133134135136137138139140141142143144145146147148149150151152153154155156157158159160161162163164165166167168169170171172173174175176177178179180181182183184185186187188189190191192193194195196197198199200201202203204205206207208209210211212213214215216217218219220221222223224225226227228229230231232233234235236237238239240241242243244245246247248249250251252253254255256257258259260261262263264265266267268269270271272273274275276277278279280281282283284285286287288289290291292293294295296297298299300301302303304305306307308309310311312313314315316317318319320321322323324325326327328329330331332333334335336337338339340341342343344345346347348349350351352353354355356357358359360361362363364365366367368369370371372373374375376377378379380381382383384385386387388389390391392393394395396397398399400401402403404405406407408409410411412413414415416417418419420421422423424425426427428429430431432433434435436437438439440441442443444445446447448449450451452453454455456457458459460461462463464465466467468469470471472473474475476477478479480481482483484485486487488489490491492493494495496497498499500501502503504505506507508509510511512513514515516517518519520521522523524525526527528529530531532533534535536537538539540541542543544545546547548549550551552553554555556557558559560561562563564565566567568569570571572573574575576577578579580581582583584585586587588589590591592593594595596597598599600601602603604605606607608609610611612613614615616617618619620621622623624625626627628629630631632633634635636637638639640641642643644645646647648649650651652653654655656657658659660661662663664665666667668669670671672673674675676677678679680681682683684685686687688689690691692693694695696697698699700701702703704705706707708709710711712713714715716717718719720721722723724725726727728729730731732733734735736737738739740741742743744745746747748749750751752753754755756757758759760761762763764765766767768769770771772773774775776777778779780781782783784785786787788789790791792793794795796797798799800801802803804805806807808809810811812813814815816817818819820821822823824825826827828829830831832833834835836837838839840841842843844845846847848849850851852853854855856857858859860861862863864865866867868869870871872873874875876877878879880881882883884885886887888889890891892893894895896897898899900901902903904905906907908909910911912913914915916917918919920921922923924925926927928929930931932933934935936937938939940941942943944945946947948949950951952953954955956957958959960961962963964965966967968969970971972973974975976977978979980981982983984985986987988989990991992993994995996997998999100010011002100310041005100610071008100910101011101210131014101510161017101810191020102110221023102410251026102710281029103010311032103310341035103610371038103910401041104210431044104510461047104810491050105110521053105410551056105710581059106010611062106310641065106610671068106910701071107210731074107510761077107810791080108110821083108410851086108710881089109010911092109310941095109610971098109911001101110211031104110511061107110811091110111111121113111411151116111711181119112011211122112311241125112611271128112911301131113211331134113511361137113811391140114111421143114411451146114711481149115011511152115311541155115611571158115911601161116211631164116511661167116811691170117111721173117411751176117711781179118011811182118311841185118611871188118911901191119211931194119511961197119811991200120112021203120412051206120712081209121012111212121312141215121612171218121912201221122212231224122512261227122812291230123112321233123412351236123712381239124012411242124312441245124612471248124912501251125212531254125512561257125812591260126112621263126412651266126712681269127012711272127312741275127612771278127912801281128212831284128512861287128812891290129112921293129412951296129712981299130013011302130313041305130613071308130913101311131213131314131513161317131813191320132113221323132413251326132713281329133013311332133313341335133613371338133913401341134213431344134513461347134813491350135113521353135413551356135713581359136013611362136313641365136613671368136913701371137213731374137513761377137813791380138113821383138413851386138713881389139013911392139313941395139613971398139914001401140214031404140514061407140814091410141114121413141414151416141714181419142014211422142314241425142614271428142914301431143214331434143514361437143814391440144114421443144414451446144714481449145014511452145314541455145614571458145914601461146214631464146514661467146814691470147114721473147414751476147714781479148014811482148314841485148614871488148914901491149214931494149514961497149814991500150115021503150415051506150715081509151015111512151315141515151615171518151915201521152215231524152515261527152815291530153115321533153415351536153715381539154015411542154315441545154615471548154915501551155215531554155515561557155815591560156115621563156415651566156715681569157015711572157315741575157615771578157915801581158215831584158515861587158815891590159115921593159415951596159715981599160016011602160316041605160616071608160916101611161216131614161516161617161816191620162116221623162416251626162716281629163016311632163316341635163616371638163916401641164216431644164516461647164816491650165116521653165416551656165716581659166016611662166316641665166616671668166916701671167216731674167516761677167816791680168116821683168416851686168716881689169016911692169316941695169616971698169917001701170217031704170517061707170817091710171117121713171417151716171717181719172017211722172317241725172617271728172917301731173217331734173517361737173817391740174117421743174417451746174717481749175017511752175317541755175617571758175917601761176217631764176517661767176817691770177117721773177417751776177717781779178017811782178317841785178617871788178917901791179217931794179517961797179817991800180118021803180418051806180718081809181018111812181318141815181618171818181918201821182218231824182518261827182818291830183118321833183418351836183718381839184018411842184318441845184618471848184918501851185218531854185518561857185818591860186118621863186418651866186718681869187018711872187318741875187618771878187918801881188218831884188518861887188818891890189118921893189418951896189718981899190019011902190319041905190619071908190919101911191219131914191519161917191819191920192119221923192419251926192719281929193019311932193319341935193619371938193919401941194219431944194519461947194819491950195119521953195419551956195719581959196019611962196319641965196619671968196919701971197219731974197519761977197819791980198119821983198419851986198719881989199019911992199319941995199619971998199920002001200220032004200520062007200820092010201120122013201420152016201720182019202020212022202320242025202620272028202920302031203220332034203520362037203820392040204120422043204420452046204720482049205020512052205320542055205620572058205920602061206220632064206520662067206820692070207120722073207420752076207720782079208020812082208320842085208620872088208920902091209220932094209520962097209820992100210121022103210421052106210721082109211021112112211321142115211621172118211921202121212221232124212521262127212821292130213121322133213421352136213721382139214021412142214321442145214621472148214921502151215221532154215521562157215821592160216121622163216421652166216721682169217021712172217321742175217621772178217921802181218221832184218521862187218821892190219121922193219421952196219721982199220022012202220322042205220622072208220922102211221222132214221522162217221822192220222122222223222422252226222722282229223022312232223322342235223622372238223922402241224222432244224522462247224822492250225122522253225422552256225722582259226022612262226322642265226622672268226922702271227222732274227522762277227822792280228122822283228422852286228722882289229022912292229322942295229622972298229923002301230223032304230523062307230823092310231123122313231423152316231723182319232023212322232323242325232623272328232923302331233223332334233523362337233823392340234123422343234423452346234723482349235023512352235323542355235623572358235923602361236223632364236523662367236823692370237123722373237423752376237723782379238023812382238323842385238623872388238923902391239223932394239523962397239823992400240124022403240424052406240724082409241024112412241324142415241624172418241924202421242224232424242524262427242824292430243124322433243424352436243724382439244024412442244324442445244624472448244924502451245224532454245524562457245824592460246124622463246424652466246724682469247024712472247324742475247624772478247924802481248224832484248524862487248824892490249124922493249424952496249724982499250025012502250325042505250625072508250925102511251225132514251525162517251825192520252125222523252425252526252725282529253025312532253325342535253625372538253925402541254225432544254525462547254825492550255125522553255425552556255725582559256025612562256325642565256625672568256925702571257225732574257525762577257825792580258125822583258425852586258725882589259025912592259325942595259625972598259926002601260226032604260526062607260826092610261126122613261426152616261726182619262026212622262326242625262626272628262926302631263226332634263526362637263826392640264126422643264426452646264726482649265026512652265326542655265626572658265926602661266226632664266526662667266826692670267126722673267426752676267726782679268026812682268326842685268626872688268926902691269226932694269526962697269826992700270127022703270427052706270727082709271027112712271327142715271627172718271927202721272227232724272527262727272827292730273127322733273427352736273727382739274027412742274327442745274627472748274927502751275227532754275527562757275827592760276127622763276427652766276727682769277027712772277327742775277627772778277927802781278227832784278527862787278827892790279127922793279427952796279727982799280028012802280328042805280628072808280928102811281228132814281528162817281828192820282128222823282428252826282728282829283028312832283328342835283628372838283928402841284228432844284528462847284828492850285128522853285428552856285728582859286028612862286328642865286628672868286928702871287228732874287528762877287828792880288128822883288428852886288728882889289028912892289328942895289628972898289929002901290229032904290529062907290829092910291129122913291429152916291729182919292029212922292329242925292629272928292929302931293229332934293529362937293829392940294129422943294429452946294729482949295029512952295329542955295629572958295929602961296229632964296529662967296829692970297129722973297429752976297729782979298029812982298329842985298629872988298929902991299229932994299529962997299829993000300130023003300430053006300730083009301030113012301330143015301630173018301930203021302230233024302530263027302830293030303130323033303430353036303730383039304030413042304330443045304630473048304930503051305230533054305530563057305830593060306130623063306430653066306730683069307030713072307330743075307630773078307930803081308230833084308530863087308830893090309130923093309430953096309730983099310031013102310331043105310631073108310931103111311231133114311531163117311831193120312131223123312431253126312731283129313031313132313331343135313631373138313931403141314231433144314531463147314831493150315131523153315431553156315731583159316031613162316331643165316631673168316931703171317231733174317531763177317831793180318131823183318431853186318731883189319031913192319331943195319631973198319932003201320232033204320532063207320832093210321132123213321432153216321732183219322032213222322332243225322632273228322932303231323232333234323532363237323832393240324132423243324432453246324732483249325032513252325332543255325632573258325932603261326232633264326532663267326832693270327132723273327432753276327732783279328032813282328332843285328632873288328932903291329232933294329532963297329832993300330133023303330433053306330733083309331033113312331333143315331633173318331933203321332233233324332533263327332833293330333133323333333433353336333733383339334033413342334333443345334633473348334933503351335233533354335533563357335833593360336133623363336433653366336733683369337033713372337333743375337633773378337933803381338233833384338533863387338833893390339133923393339433953396339733983399340034013402340334043405340634073408340934103411341234133414341534163417341834193420342134223423342434253426342734283429343034313432343334343435343634373438343934403441344234433444344534463447344834493450345134523453345434553456345734583459346034613462346334643465346634673468346934703471347234733474347534763477347834793480348134823483348434853486348734883489349034913492349334943495349634973498349935003501350235033504350535063507350835093510351135123513351435153516351735183519352035213522352335243525352635273528352935303531353235333534353535363537353835393540354135423543354435453546354735483549355035513552355335543555355635573558355935603561356235633564356535663567356835693570357135723573357435753576357735783579358035813582358335843585358635873588358935903591359235933594359535963597359835993600360136023603360436053606360736083609361036113612361336143615361636173618361936203621362236233624362536263627362836293630363136323633363436353636363736383639364036413642364336443645364636473648364936503651365236533654365536563657365836593660366136623663366436653666366736683669367036713672367336743675367636773678367936803681368236833684368536863687368836893690369136923693369436953696369736983699370037013702370337043705370637073708370937103711371237133714371537163717371837193720372137223723372437253726372737283729373037313732373337343735373637373738373937403741374237433744374537463747374837493750375137523753375437553756375737583759376037613762376337643765376637673768376937703771377237733774377537763777377837793780378137823783378437853786378737883789379037913792379337943795379637973798379938003801380238033804380538063807380838093810381138123813381438153816381738183819382038213822382338243825382638273828382938303831383238333834383538363837383838393840384138423843384438453846384738483849385038513852385338543855385638573858385938603861386238633864386538663867386838693870387138723873387438753876387738783879388038813882388338843885388638873888388938903891389238933894389538963897389838993900390139023903390439053906390739083909391039113912391339143915391639173918391939203921392239233924392539263927392839293930393139323933393439353936393739383939394039413942394339443945394639473948394939503951395239533954395539563957395839593960396139623963396439653966396739683969397039713972397339743975397639773978397939803981398239833984398539863987398839893990399139923993399439953996399739983999400040014002400340044005400640074008400940104011401240134014401540164017401840194020402140224023402440254026402740284029403040314032403340344035403640374038403940404041404240434044404540464047404840494050405140524053405440554056405740584059406040614062406340644065406640674068406940704071407240734074407540764077407840794080408140824083408440854086408740884089409040914092409340944095409640974098409941004101410241034104410541064107410841094110411141124113411441154116411741184119412041214122412341244125412641274128412941304131413241334134413541364137413841394140414141424143414441454146414741484149415041514152415341544155415641574158415941604161416241634164416541664167416841694170417141724173417441754176417741784179418041814182418341844185418641874188418941904191419241934194419541964197419841994200420142024203420442054206420742084209421042114212421342144215421642174218421942204221422242234224422542264227422842294230423142324233423442354236423742384239424042414242424342444245424642474248424942504251425242534254425542564257425842594260426142624263426442654266426742684269427042714272427342744275427642774278427942804281428242834284428542864287428842894290429142924293429442954296429742984299430043014302430343044305430643074308430943104311431243134314431543164317431843194320432143224323432443254326432743284329433043314332433343344335433643374338433943404341434243434344434543464347434843494350435143524353435443554356435743584359436043614362436343644365436643674368436943704371437243734374437543764377437843794380438143824383438443854386438743884389439043914392439343944395439643974398439944004401440244034404440544064407440844094410441144124413441444154416441744184419442044214422442344244425442644274428442944304431443244334434443544364437443844394440444144424443444444454446444744484449445044514452445344544455445644574458445944604461446244634464446544664467446844694470447144724473447444754476447744784479448044814482448344844485448644874488448944904491449244934494449544964497449844994500450145024503450445054506450745084509451045114512451345144515451645174518451945204521452245234524452545264527452845294530453145324533453445354536453745384539454045414542454345444545454645474548454945504551455245534554455545564557455845594560456145624563456445654566456745684569457045714572457345744575457645774578457945804581458245834584458545864587458845894590459145924593459445954596459745984599460046014602460346044605460646074608460946104611461246134614461546164617461846194620462146224623462446254626462746284629463046314632463346344635463646374638463946404641464246434644464546464647464846494650465146524653465446554656465746584659466046614662466346644665466646674668466946704671467246734674467546764677467846794680468146824683468446854686468746884689469046914692469346944695469646974698469947004701470247034704470547064707470847094710471147124713471447154716471747184719472047214722472347244725472647274728472947304731473247334734473547364737473847394740474147424743474447454746474747484749475047514752475347544755475647574758475947604761476247634764476547664767476847694770477147724773477447754776477747784779478047814782478347844785478647874788478947904791479247934794479547964797479847994800480148024803480448054806480748084809481048114812481348144815481648174818481948204821482248234824482548264827482848294830483148324833483448354836483748384839484048414842484348444845484648474848484948504851485248534854485548564857485848594860486148624863486448654866486748684869487048714872487348744875487648774878487948804881488248834884488548864887488848894890489148924893489448954896489748984899490049014902490349044905490649074908490949104911491249134914491549164917491849194920492149224923492449254926492749284929493049314932493349344935493649374938493949404941494249434944494549464947494849494950495149524953495449554956495749584959496049614962496349644965496649674968496949704971497249734974497549764977497849794980498149824983498449854986498749884989499049914992499349944995499649974998499950005001500250035004500550065007500850095010501150125013501450155016501750185019502050215022502350245025502650275028502950305031503250335034503550365037503850395040504150425043504450455046504750485049505050515052505350545055505650575058505950605061506250635064506550665067506850695070507150725073507450755076507750785079508050815082508350845085508650875088508950905091509250935094509550965097509850995100510151025103510451055106510751085109511051115112511351145115511651175118511951205121512251235124512551265127512851295130513151325133513451355136513751385139514051415142514351445145514651475148514951505151515251535154515551565157515851595160516151625163516451655166516751685169517051715172517351745175517651775178517951805181518251835184518551865187518851895190519151925193519451955196519751985199520052015202520352045205520652075208520952105211521252135214521552165217521852195220522152225223522452255226522752285229523052315232523352345235523652375238523952405241524252435244524552465247524852495250525152525253525452555256525752585259526052615262526352645265526652675268526952705271527252735274527552765277527852795280528152825283528452855286528752885289529052915292529352945295529652975298529953005301530253035304530553065307530853095310531153125313531453155316531753185319532053215322532353245325532653275328532953305331533253335334533553365337533853395340534153425343534453455346534753485349535053515352535353545355535653575358535953605361536253635364536553665367536853695370537153725373537453755376537753785379538053815382538353845385538653875388538953905391539253935394539553965397539853995400540154025403540454055406540754085409541054115412541354145415541654175418541954205421542254235424542554265427542854295430543154325433543454355436543754385439544054415442544354445445544654475448544954505451545254535454545554565457545854595460546154625463546454655466546754685469547054715472547354745475547654775478547954805481548254835484548554865487548854895490549154925493549454955496549754985499550055015502550355045505550655075508550955105511551255135514551555165517551855195520552155225523552455255526552755285529553055315532553355345535553655375538553955405541554255435544554555465547554855495550555155525553555455555556555755585559556055615562556355645565556655675568556955705571557255735574557555765577557855795580558155825583558455855586558755885589559055915592559355945595559655975598559956005601560256035604560556065607560856095610561156125613561456155616561756185619562056215622562356245625562656275628562956305631563256335634563556365637563856395640564156425643564456455646564756485649565056515652565356545655565656575658565956605661566256635664566556665667566856695670567156725673567456755676567756785679568056815682568356845685568656875688568956905691569256935694569556965697569856995700570157025703570457055706570757085709571057115712571357145715571657175718571957205721572257235724572557265727572857295730573157325733573457355736573757385739574057415742574357445745574657475748574957505751575257535754575557565757575857595760576157625763576457655766576757685769577057715772577357745775577657775778577957805781578257835784578557865787578857895790579157925793579457955796579757985799580058015802580358045805580658075808580958105811581258135814581558165817581858195820582158225823582458255826582758285829583058315832583358345835583658375838583958405841584258435844584558465847584858495850585158525853585458555856585758585859586058615862586358645865586658675868586958705871587258735874587558765877587858795880588158825883588458855886588758885889589058915892589358945895589658975898589959005901590259035904590559065907590859095910591159125913591459155916591759185919592059215922592359245925592659275928592959305931593259335934593559365937593859395940594159425943594459455946594759485949595059515952595359545955595659575958595959605961596259635964596559665967596859695970597159725973597459755976597759785979598059815982598359845985598659875988598959905991599259935994599559965997599859996000600160026003600460056006600760086009601060116012601360146015601660176018601960206021602260236024602560266027602860296030603160326033603460356036603760386039604060416042604360446045604660476048604960506051605260536054605560566057605860596060606160626063606460656066606760686069607060716072607360746075607660776078607960806081608260836084608560866087608860896090609160926093609460956096609760986099610061016102610361046105610661076108610961106111611261136114611561166117611861196120612161226123612461256126612761286129613061316132613361346135613661376138613961406141614261436144614561466147614861496150615161526153615461556156615761586159616061616162616361646165616661676168616961706171617261736174617561766177617861796180618161826183618461856186618761886189619061916192619361946195619661976198619962006201620262036204620562066207620862096210621162126213621462156216621762186219622062216222622362246225622662276228622962306231623262336234623562366237623862396240624162426243624462456246624762486249625062516252625362546255625662576258625962606261626262636264626562666267626862696270627162726273627462756276627762786279628062816282628362846285628662876288628962906291629262936294629562966297629862996300630163026303630463056306630763086309631063116312631363146315631663176318631963206321632263236324632563266327632863296330633163326333633463356336633763386339634063416342634363446345634663476348634963506351635263536354635563566357635863596360636163626363636463656366636763686369637063716372637363746375637663776378637963806381638263836384638563866387638863896390639163926393639463956396639763986399640064016402640364046405640664076408640964106411641264136414641564166417641864196420642164226423642464256426642764286429643064316432643364346435643664376438643964406441644264436444644564466447644864496450645164526453645464556456645764586459646064616462646364646465646664676468646964706471647264736474647564766477647864796480648164826483648464856486648764886489649064916492649364946495649664976498649965006501650265036504650565066507650865096510651165126513651465156516651765186519652065216522652365246525652665276528652965306531653265336534653565366537653865396540654165426543654465456546654765486549655065516552655365546555655665576558655965606561656265636564656565666567656865696570657165726573657465756576657765786579658065816582658365846585658665876588658965906591659265936594659565966597659865996600660166026603660466056606660766086609661066116612661366146615661666176618661966206621662266236624662566266627662866296630663166326633663466356636663766386639664066416642664366446645664666476648664966506651665266536654665566566657665866596660666166626663666466656666666766686669667066716672667366746675667666776678667966806681668266836684668566866687668866896690669166926693669466956696669766986699670067016702670367046705670667076708670967106711671267136714671567166717671867196720672167226723672467256726672767286729673067316732673367346735673667376738673967406741674267436744674567466747674867496750675167526753675467556756675767586759676067616762676367646765676667676768676967706771677267736774677567766777677867796780678167826783678467856786678767886789679067916792679367946795679667976798679968006801680268036804680568066807680868096810681168126813681468156816681768186819682068216822682368246825682668276828682968306831683268336834683568366837683868396840684168426843684468456846684768486849685068516852685368546855685668576858685968606861686268636864686568666867686868696870687168726873687468756876687768786879688068816882688368846885688668876888688968906891689268936894689568966897689868996900690169026903690469056906690769086909691069116912691369146915691669176918691969206921692269236924692569266927692869296930693169326933693469356936693769386939694069416942694369446945694669476948694969506951695269536954695569566957695869596960696169626963696469656966696769686969697069716972697369746975697669776978697969806981698269836984698569866987698869896990699169926993699469956996699769986999700070017002700370047005700670077008700970107011701270137014701570167017701870197020702170227023702470257026702770287029703070317032703370347035703670377038703970407041704270437044704570467047704870497050705170527053705470557056705770587059706070617062706370647065706670677068706970707071707270737074707570767077707870797080708170827083708470857086708770887089709070917092709370947095709670977098709971007101710271037104710571067107710871097110711171127113711471157116711771187119712071217122712371247125712671277128712971307131713271337134713571367137713871397140714171427143714471457146714771487149715071517152715371547155715671577158715971607161716271637164716571667167716871697170717171727173717471757176717771787179718071817182718371847185718671877188718971907191719271937194719571967197719871997200720172027203720472057206720772087209721072117212721372147215721672177218721972207221722272237224722572267227722872297230723172327233723472357236723772387239724072417242724372447245724672477248724972507251725272537254725572567257725872597260726172627263726472657266726772687269727072717272727372747275727672777278727972807281728272837284728572867287728872897290729172927293729472957296729772987299730073017302730373047305730673077308730973107311731273137314731573167317731873197320732173227323732473257326732773287329733073317332733373347335733673377338733973407341734273437344734573467347734873497350735173527353735473557356735773587359736073617362736373647365736673677368736973707371737273737374737573767377737873797380738173827383738473857386738773887389739073917392739373947395739673977398739974007401740274037404740574067407740874097410741174127413741474157416741774187419742074217422742374247425742674277428742974307431743274337434743574367437743874397440744174427443744474457446744774487449745074517452745374547455745674577458745974607461746274637464746574667467746874697470747174727473747474757476747774787479748074817482748374847485748674877488748974907491749274937494749574967497749874997500750175027503750475057506750775087509751075117512751375147515751675177518751975207521752275237524752575267527752875297530753175327533753475357536753775387539754075417542754375447545754675477548754975507551755275537554755575567557755875597560756175627563756475657566756775687569757075717572757375747575757675777578757975807581758275837584758575867587758875897590759175927593759475957596759775987599760076017602760376047605760676077608760976107611761276137614761576167617761876197620762176227623762476257626762776287629763076317632763376347635763676377638763976407641764276437644764576467647764876497650765176527653765476557656765776587659766076617662766376647665766676677668766976707671767276737674767576767677767876797680768176827683768476857686768776887689769076917692769376947695769676977698769977007701770277037704770577067707770877097710771177127713771477157716771777187719772077217722772377247725772677277728772977307731773277337734773577367737773877397740774177427743774477457746774777487749775077517752775377547755775677577758775977607761776277637764776577667767776877697770777177727773777477757776777777787779778077817782778377847785778677877788778977907791779277937794779577967797779877997800780178027803780478057806780778087809781078117812781378147815781678177818781978207821782278237824782578267827782878297830783178327833783478357836783778387839784078417842784378447845784678477848784978507851785278537854785578567857785878597860786178627863786478657866786778687869787078717872787378747875787678777878787978807881788278837884788578867887788878897890789178927893789478957896789778987899790079017902790379047905790679077908790979107911791279137914791579167917791879197920792179227923792479257926792779287929793079317932793379347935793679377938793979407941794279437944794579467947794879497950795179527953795479557956795779587959796079617962796379647965796679677968796979707971797279737974797579767977797879797980798179827983798479857986798779887989799079917992799379947995799679977998799980008001800280038004800580068007800880098010801180128013801480158016801780188019802080218022802380248025802680278028802980308031803280338034803580368037803880398040804180428043804480458046804780488049805080518052805380548055805680578058805980608061806280638064806580668067806880698070807180728073807480758076807780788079808080818082808380848085808680878088808980908091809280938094809580968097809880998100810181028103810481058106810781088109811081118112811381148115811681178118811981208121812281238124812581268127812881298130813181328133813481358136813781388139814081418142814381448145814681478148814981508151815281538154815581568157815881598160816181628163816481658166816781688169817081718172817381748175817681778178817981808181818281838184818581868187818881898190819181928193819481958196819781988199820082018202820382048205820682078208820982108211821282138214821582168217821882198220822182228223822482258226822782288229823082318232823382348235823682378238823982408241824282438244824582468247824882498250825182528253825482558256825782588259826082618262826382648265826682678268826982708271827282738274827582768277827882798280828182828283828482858286828782888289829082918292829382948295829682978298829983008301830283038304830583068307830883098310831183128313831483158316831783188319832083218322832383248325832683278328832983308331833283338334833583368337833883398340834183428343834483458346834783488349835083518352835383548355835683578358835983608361836283638364836583668367836883698370837183728373837483758376837783788379838083818382838383848385838683878388838983908391839283938394839583968397839883998400840184028403840484058406840784088409841084118412841384148415841684178418841984208421842284238424842584268427842884298430843184328433843484358436843784388439844084418442844384448445844684478448844984508451845284538454845584568457845884598460846184628463846484658466846784688469847084718472847384748475847684778478847984808481848284838484848584868487848884898490849184928493849484958496849784988499850085018502850385048505850685078508850985108511851285138514851585168517851885198520852185228523852485258526852785288529853085318532853385348535853685378538853985408541854285438544854585468547854885498550855185528553855485558556855785588559856085618562856385648565856685678568856985708571857285738574857585768577857885798580858185828583858485858586858785888589859085918592859385948595859685978598859986008601860286038604860586068607860886098610861186128613861486158616861786188619862086218622862386248625862686278628862986308631863286338634863586368637863886398640864186428643864486458646864786488649865086518652865386548655865686578658865986608661866286638664866586668667866886698670867186728673867486758676867786788679868086818682868386848685868686878688868986908691869286938694869586968697869886998700870187028703870487058706870787088709871087118712871387148715871687178718871987208721872287238724872587268727872887298730873187328733873487358736873787388739874087418742874387448745874687478748874987508751875287538754875587568757875887598760876187628763876487658766876787688769877087718772877387748775877687778778877987808781878287838784878587868787878887898790879187928793879487958796879787988799880088018802880388048805880688078808880988108811881288138814881588168817881888198820882188228823882488258826882788288829883088318832883388348835883688378838883988408841884288438844884588468847884888498850885188528853885488558856885788588859886088618862886388648865886688678868886988708871887288738874887588768877887888798880888188828883888488858886888788888889889088918892889388948895889688978898889989008901890289038904890589068907890889098910891189128913891489158916891789188919892089218922892389248925892689278928892989308931893289338934893589368937893889398940894189428943894489458946894789488949895089518952895389548955895689578958895989608961896289638964896589668967896889698970897189728973897489758976897789788979898089818982898389848985898689878988898989908991899289938994899589968997899889999000900190029003900490059006900790089009901090119012901390149015901690179018901990209021902290239024902590269027902890299030903190329033903490359036903790389039904090419042904390449045904690479048904990509051905290539054905590569057905890599060906190629063906490659066906790689069907090719072907390749075907690779078907990809081908290839084908590869087908890899090909190929093909490959096909790989099910091019102910391049105910691079108910991109111911291139114911591169117911891199120912191229123912491259126912791289129913091319132913391349135913691379138913991409141914291439144914591469147914891499150915191529153915491559156915791589159916091619162916391649165916691679168916991709171917291739174917591769177917891799180918191829183918491859186918791889189919091919192919391949195919691979198919992009201920292039204920592069207920892099210921192129213921492159216921792189219922092219222922392249225922692279228922992309231923292339234923592369237923892399240924192429243924492459246924792489249925092519252925392549255925692579258925992609261926292639264926592669267926892699270927192729273927492759276927792789279928092819282928392849285928692879288928992909291929292939294929592969297929892999300930193029303930493059306930793089309931093119312931393149315931693179318931993209321932293239324932593269327932893299330933193329333933493359336933793389339934093419342934393449345934693479348934993509351935293539354935593569357935893599360936193629363936493659366936793689369937093719372937393749375937693779378937993809381938293839384938593869387938893899390939193929393939493959396939793989399940094019402940394049405940694079408940994109411941294139414941594169417941894199420942194229423942494259426942794289429943094319432943394349435943694379438943994409441944294439444944594469447944894499450945194529453945494559456945794589459946094619462946394649465946694679468946994709471947294739474947594769477947894799480948194829483948494859486948794889489949094919492949394949495949694979498949995009501950295039504950595069507950895099510951195129513951495159516951795189519952095219522952395249525952695279528952995309531953295339534953595369537953895399540954195429543954495459546954795489549955095519552955395549555955695579558955995609561956295639564956595669567956895699570957195729573957495759576957795789579958095819582958395849585958695879588958995909591959295939594959595969597959895999600960196029603960496059606960796089609961096119612961396149615961696179618961996209621962296239624962596269627962896299630963196329633963496359636963796389639964096419642964396449645964696479648964996509651965296539654965596569657965896599660966196629663966496659666966796689669967096719672967396749675967696779678967996809681968296839684968596869687968896899690969196929693969496959696969796989699970097019702970397049705970697079708970997109711971297139714971597169717971897199720972197229723972497259726972797289729973097319732973397349735973697379738973997409741974297439744974597469747974897499750975197529753975497559756975797589759976097619762976397649765976697679768976997709771977297739774977597769777977897799780978197829783978497859786978797889789979097919792979397949795979697979798979998009801980298039804980598069807980898099810981198129813981498159816981798189819982098219822982398249825982698279828982998309831983298339834983598369837983898399840984198429843984498459846984798489849985098519852985398549855985698579858985998609861986298639864986598669867986898699870987198729873987498759876987798789879988098819882988398849885988698879888988998909891989298939894989598969897989898999900990199029903990499059906990799089909991099119912991399149915991699179918991999209921992299239924992599269927992899299930993199329933993499359936993799389939994099419942994399449945994699479948994999509951995299539954995599569957995899599960996199629963996499659966996799689969997099719972997399749975997699779978997999809981998299839984998599869987998899899990999199929993999499959996999799989999100001000110002100031000410005100061000710008100091001010011100121001310014100151001610017100181001910020100211002210023100241002510026100271002810029100301003110032100331003410035100361003710038100391004010041100421004310044100451004610047100481004910050100511005210053100541005510056100571005810059100601006110062100631006410065100661006710068100691007010071100721007310074100751007610077100781007910080100811008210083100841008510086100871008810089100901009110092100931009410095100961009710098100991010010101101021010310104101051010610107101081010910110101111011210113101141011510116101171011810119101201012110122101231012410125101261012710128101291013010131101321013310134101351013610137101381013910140101411014210143101441014510146101471014810149101501015110152101531015410155101561015710158101591016010161101621016310164101651016610167101681016910170101711017210173101741017510176101771017810179101801018110182101831018410185101861018710188101891019010191101921019310194101951019610197101981019910200102011020210203102041020510206102071020810209102101021110212102131021410215102161021710218102191022010221102221022310224102251022610227102281022910230102311023210233102341023510236102371023810239102401024110242102431024410245102461024710248102491025010251102521025310254102551025610257102581025910260102611026210263102641026510266102671026810269102701027110272102731027410275102761027710278102791028010281102821028310284102851028610287102881028910290102911029210293102941029510296102971029810299103001030110302103031030410305103061030710308103091031010311103121031310314103151031610317103181031910320103211032210323103241032510326103271032810329103301033110332103331033410335103361033710338103391034010341103421034310344103451034610347103481034910350103511035210353103541035510356103571035810359103601036110362103631036410365103661036710368103691037010371103721037310374103751037610377103781037910380103811038210383103841038510386103871038810389 |
- <?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
- <!-- Generated by the JDiff Javadoc doclet -->
- <!-- (http://www.jdiff.org) -->
- <!-- on Sun May 31 20:46:08 PDT 2009 -->
- <api
- xmlns:xsi='http://www.w3.org/2001/XMLSchema-instance'
- xsi:noNamespaceSchemaLocation='api.xsd'
- name="hadoop-hdfs 0.20.0"
- jdversion="1.0.9">
- <!-- Command line arguments = -doclet jdiff.JDiff -docletpath /home/gkesavan/release-0.20.0/build/ivy/lib/Hadoop/jdiff/jdiff-1.0.9.jar:/home/gkesavan/release-0.20.0/build/ivy/lib/Hadoop/jdiff/xerces-1.4.4.jar -classpath /home/gkesavan/release-0.20.0/build/classes:/home/gkesavan/release-0.20.0/lib/commons-cli-2.0-SNAPSHOT.jar:/home/gkesavan/release-0.20.0/lib/hsqldb-1.8.0.10.jar:/home/gkesavan/release-0.20.0/lib/jsp-2.1/jsp-2.1.jar:/home/gkesavan/release-0.20.0/lib/jsp-2.1/jsp-api-2.1.jar:/home/gkesavan/release-0.20.0/lib/kfs-0.2.2.jar:/home/gkesavan/release-0.20.0/conf:/home/gkesavan/.ivy2/cache/commons-logging/commons-logging/jars/commons-logging-1.0.4.jar:/home/gkesavan/.ivy2/cache/log4j/log4j/jars/log4j-1.2.15.jar:/home/gkesavan/.ivy2/cache/commons-httpclient/commons-httpclient/jars/commons-httpclient-3.0.1.jar:/home/gkesavan/.ivy2/cache/commons-codec/commons-codec/jars/commons-codec-1.3.jar:/home/gkesavan/.ivy2/cache/xmlenc/xmlenc/jars/xmlenc-0.52.jar:/home/gkesavan/.ivy2/cache/net.java.dev.jets3t/jets3t/jars/jets3t-0.6.1.jar:/home/gkesavan/.ivy2/cache/commons-net/commons-net/jars/commons-net-1.4.1.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/servlet-api-2.5/jars/servlet-api-2.5-6.1.14.jar:/home/gkesavan/.ivy2/cache/oro/oro/jars/oro-2.0.8.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/jetty/jars/jetty-6.1.14.jar:/home/gkesavan/.ivy2/cache/org.mortbay.jetty/jetty-util/jars/jetty-util-6.1.14.jar:/home/gkesavan/.ivy2/cache/tomcat/jasper-runtime/jars/jasper-runtime-5.5.12.jar:/home/gkesavan/.ivy2/cache/tomcat/jasper-compiler/jars/jasper-compiler-5.5.12.jar:/home/gkesavan/.ivy2/cache/commons-el/commons-el/jars/commons-el-1.0.jar:/home/gkesavan/.ivy2/cache/junit/junit/jars/junit-3.8.1.jar:/home/gkesavan/.ivy2/cache/commons-logging/commons-logging-api/jars/commons-logging-api-1.0.4.jar:/home/gkesavan/.ivy2/cache/org.slf4j/slf4j-api/jars/slf4j-api-1.4.3.jar:/home/gkesavan/.ivy2/cache/org.eclipse.jdt/core/jars/core-3.1.1.jar:/home/gkesavan/.ivy2/cache/org.slf4j/slf4j-log4j12/jars/slf4j-log4j12-1.4.3.jar:/home/gkesavan/.ivy2/cache/jdiff/jdiff/jars/jdiff-1.0.9.jar:/home/gkesavan/.ivy2/cache/xerces/xerces/jars/xerces-1.4.4.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-launcher.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-resolver.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-starteam.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-netrexx.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-testutil.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jai.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-swing.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jmf.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-bcel.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jdepend.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-jsch.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-bsf.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-antlr.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-weblogic.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-junit.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-log4j.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/xercesImpl.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-oro.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-trax.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-nodeps.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-commons-logging.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-apache-regexp.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-stylebook.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-javamail.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/ant-commons-net.jar:/home/gkesavan/tools/apache-ant-1.7.1/lib/xml-apis.jar:/home/gkesavan/tools/jdk1.6.0_07-32bit/lib/tools.jar -sourcepath /home/gkesavan/release-0.20.0/src/hdfs -apidir /home/gkesavan/release-0.20.0/lib/jdiff -apiname hadoop 0.20.1-dev -->
- <package name="org.apache.hadoop.hdfs">
- <!-- start class org.apache.hadoop.hdfs.ChecksumDistributedFileSystem -->
- <class name="ChecksumDistributedFileSystem" extends="org.apache.hadoop.fs.ChecksumFileSystem"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="ChecksumDistributedFileSystem"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="ChecksumDistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="deprecated, no comment">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@deprecated]]>
- </doc>
- </constructor>
- <method name="getRawCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the total raw capacity of the filesystem, disregarding
- replication .]]>
- </doc>
- </method>
- <method name="getRawUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the total raw used space in the filesystem, disregarding
- replication .]]>
- </doc>
- </method>
- <method name="getDataNodeStats" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return statistics for each datanode.]]>
- </doc>
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Enter, leave or get safe mode.
-
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
- </doc>
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="finalizeUpgrade"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Finalize previously upgraded files system state.]]>
- </doc>
- </method>
- <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="metaSave"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="pathname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="reportChecksumFailure" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
- <param name="inPos" type="long"/>
- <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
- <param name="sumsPos" type="long"/>
- <doc>
- <![CDATA[We need to find the blocks that didn't match. Likely only one
- is corrupt but we will report both to the namenode. In the future,
- we can consider figuring out exactly which block is corrupt.]]>
- </doc>
- </method>
- <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the stat information about the file.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[An implementation of ChecksumFileSystem over DistributedFileSystem.
- Note that as of now (May 07), DistributedFileSystem natively checksums
- all of its data. Using this class is not be necessary in most cases.
- Currently provided mainly for backward compatibility and testing.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.ChecksumDistributedFileSystem -->
- <!-- start class org.apache.hadoop.hdfs.DFSClient -->
- <class name="DFSClient" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <implements name="java.io.Closeable"/>
- <constructor name="DFSClient" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a new DFSClient connected to the default namenode.]]>
- </doc>
- </constructor>
- <constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration, org.apache.hadoop.fs.FileSystem.Statistics"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a new DFSClient connected to the given namenode server.]]>
- </doc>
- </constructor>
- <constructor name="DFSClient" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </constructor>
- <method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="createNamenode" return="org.apache.hadoop.hdfs.protocol.ClientProtocol"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nameNodeAddr" type="java.net.InetSocketAddress"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Close the file system, abandoning all of the leases and files being
- created and close connections to the namenode.]]>
- </doc>
- </method>
- <method name="getDefaultBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the default block size for this cluster
- @return the default block size in bytes]]>
- </doc>
- </method>
- <method name="getBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="reportBadBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Report corrupt blocks that were discovered by the client.]]>
- </doc>
- </method>
- <method name="getDefaultReplication" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getHints" return="java.lang.String[][]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use getBlockLocations instead
- Get hints about the location of the indicated block(s).
-
- getHints() returns a list of hostnames that store data for
- a specific file region. It returns a set of hostnames for
- every block within the indicated region.
- This function is very useful when writing code that considers
- data-placement when performing operations. For example, the
- MapReduce system tries to schedule tasks on the same machines
- as the data-block the task processes.">
- <param name="src" type="java.lang.String"/>
- <param name="start" type="long"/>
- <param name="length" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@deprecated Use getBlockLocations instead
- Get hints about the location of the indicated block(s).
-
- getHints() returns a list of hostnames that store data for
- a specific file region. It returns a set of hostnames for
- every block within the indicated region.
- This function is very useful when writing code that considers
- data-placement when performing operations. For example, the
- MapReduce system tries to schedule tasks on the same machines
- as the data-block the task processes.]]>
- </doc>
- </method>
- <method name="getBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="start" type="long"/>
- <param name="length" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get block location info about file
-
- getBlockLocations() returns a list of hostnames that store
- data for a specific file region. It returns a set of hostnames
- for every block within the indicated region.
- This function is very useful when writing code that considers
- data-placement when performing operations. For example, the
- MapReduce system tries to schedule tasks on the same machines
- as the data-block the task processes.]]>
- </doc>
- </method>
- <method name="open" return="org.apache.hadoop.hdfs.DFSClient.DFSInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a new dfs file and return an output stream for writing into it.
-
- @param src stream name
- @param overwrite do not check for file existence if true
- @return output stream
- @throws IOException]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a new dfs file and return an output stream for writing into it
- with write-progress reporting.
-
- @param src stream name
- @param overwrite do not check for file existence if true
- @return output stream
- @throws IOException]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a new dfs file with the specified block replication
- and return an output stream for writing into the file.
-
- @param src stream name
- @param overwrite do not check for file existence if true
- @param replication block replication
- @return output stream
- @throws IOException]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a new dfs file with the specified block replication
- with write-progress reporting and return an output stream for writing
- into the file.
-
- @param src stream name
- @param overwrite do not check for file existence if true
- @param replication block replication
- @return output stream
- @throws IOException]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="buffersize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Call
- {@link #create(String,FsPermission,boolean,short,long,Progressable,int)}
- with default permission.
- @see FsPermission#getDefault()]]>
- </doc>
- </method>
- <method name="create" return="java.io.OutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="overwrite" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <param name="buffersize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a new dfs file with the specified block replication
- with write-progress reporting and return an output stream for writing
- into the file.
-
- @param src stream name
- @param permission The permission of the directory being created.
- If permission == null, use {@link FsPermission#getDefault()}.
- @param overwrite do not check for file existence if true
- @param replication block replication
- @return output stream
- @throws IOException
- @see ClientProtocol#create(String, FsPermission, String, boolean, short, long)]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set replication for an existing file.
-
- @see ClientProtocol#setReplication(String, short)
- @param replication
- @throws IOException
- @return true is successful or false if file does not exist]]>
- </doc>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Rename file or directory.
- See {@link ClientProtocol#rename(String, String)}.]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Delete file or directory.
- See {@link ClientProtocol#delete(String)}.]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[delete file or directory.
- delete contents of the directory if non empty and recursive
- set to true]]>
- </doc>
- </method>
- <method name="exists" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Implemented using getFileInfo(src)]]>
- </doc>
- </method>
- <method name="isDirectory" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="Use getFileStatus() instead">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@deprecated Use getFileStatus() instead]]>
- </doc>
- </method>
- <method name="listPaths" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFileInfo" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="namenode" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
- <param name="socketFactory" type="javax.net.SocketFactory"/>
- <param name="socketTimeout" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the checksum of a file.
- @param src The file path
- @return The checksum]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set permissions to a file or directory.
- @param src path name.
- @param permission
- @throws <code>FileNotFoundException</code> is file does not exist.]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set file or directory owner.
- @param src path name.
- @param username user id.
- @param groupname user group.
- @throws <code>FileNotFoundException</code> is file does not exist.]]>
- </doc>
- </method>
- <method name="getDiskStatus" return="org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="totalRawCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="totalRawUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getMissingBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with no good replicas left. Normally should be
- zero.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getUnderReplicatedBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with one of more replica missing.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getCorruptBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with at least one replica marked corrupt.
- @throws IOException]]>
- </doc>
- </method>
- <method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Enter, leave or get safe mode.
- See {@link ClientProtocol#setSafeMode(FSConstants.SafeModeAction)}
- for more details.
-
- @see ClientProtocol#setSafeMode(FSConstants.SafeModeAction)]]>
- </doc>
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refresh the hosts and exclude files. (Rereads them.)
- See {@link ClientProtocol#refreshNodes()}
- for more details.
-
- @see ClientProtocol#refreshNodes()]]>
- </doc>
- </method>
- <method name="metaSave"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="pathname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Dumps DFS data structures into specified file.
- See {@link ClientProtocol#metaSave(String)}
- for more details.
-
- @see ClientProtocol#metaSave(String)]]>
- </doc>
- </method>
- <method name="finalizeUpgrade"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@see ClientProtocol#finalizeUpgrade()]]>
- </doc>
- </method>
- <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@see ClientProtocol#distributedUpgradeProgress(FSConstants.UpgradeAction)]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a directory (or hierarchy of directories) with the given
- name and permission.
- @param src The path of the directory being created
- @param permission The permission of the directory being created.
- If permission == null, use {@link FsPermission#getDefault()}.
- @return True if the operation success.
- @see ClientProtocol#mkdirs(String, FsPermission)]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[set the modification and access time of a file
- @throws FileNotFoundException if the path is not a file]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="MAX_BLOCK_ACQUIRE_FAILURES" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="namenode" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"
- transient="false" volatile="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DFSClient can connect to a Hadoop Filesystem and
- perform basic file tasks. It uses the ClientProtocol
- to communicate with a NameNode daemon, and connects
- directly to DataNodes to read/write block data.
- Hadoop DFS users should obtain an instance of
- DistributedFileSystem, which uses DFSClient to handle
- filesystem tasks.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSClient -->
- <!-- start class org.apache.hadoop.hdfs.DFSClient.BlockReader -->
- <class name="DFSClient.BlockReader" extends="org.apache.hadoop.fs.FSInputChecker"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="read" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="buf" type="byte[]"/>
- <param name="off" type="int"/>
- <param name="len" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="skip" return="long"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="n" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="read" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="seekToNewSource" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="targetPos" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="seek"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="pos" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getChunkPosition" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="pos" type="long"/>
- </method>
- <method name="readChunk" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="pos" type="long"/>
- <param name="buf" type="byte[]"/>
- <param name="offset" type="int"/>
- <param name="len" type="int"/>
- <param name="checksumBuf" type="byte[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="newBlockReader" return="org.apache.hadoop.hdfs.DFSClient.BlockReader"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sock" type="java.net.Socket"/>
- <param name="file" type="java.lang.String"/>
- <param name="blockId" type="long"/>
- <param name="genStamp" type="long"/>
- <param name="startOffset" type="long"/>
- <param name="len" type="long"/>
- <param name="bufferSize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="newBlockReader" return="org.apache.hadoop.hdfs.DFSClient.BlockReader"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sock" type="java.net.Socket"/>
- <param name="file" type="java.lang.String"/>
- <param name="blockId" type="long"/>
- <param name="genStamp" type="long"/>
- <param name="startOffset" type="long"/>
- <param name="len" type="long"/>
- <param name="bufferSize" type="int"/>
- <param name="verifyChecksum" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Java Doc required]]>
- </doc>
- </method>
- <method name="newBlockReader" return="org.apache.hadoop.hdfs.DFSClient.BlockReader"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sock" type="java.net.Socket"/>
- <param name="file" type="java.lang.String"/>
- <param name="blockId" type="long"/>
- <param name="genStamp" type="long"/>
- <param name="startOffset" type="long"/>
- <param name="len" type="long"/>
- <param name="bufferSize" type="int"/>
- <param name="verifyChecksum" type="boolean"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readAll" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="buf" type="byte[]"/>
- <param name="offset" type="int"/>
- <param name="len" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[kind of like readFully(). Only reads as much as possible.
- And allows use of protected readFully().]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This is a wrapper around connection to datadone
- and understands checksum, offset etc]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSClient.BlockReader -->
- <!-- start class org.apache.hadoop.hdfs.DFSUtil -->
- <class name="DFSUtil" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DFSUtil"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="isValidName" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <doc>
- <![CDATA[Whether the pathname is valid. Currently prohibits relative paths,
- and names which contain a ":" or "/"]]>
- </doc>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DFSUtil -->
- <!-- start class org.apache.hadoop.hdfs.DistributedFileSystem -->
- <class name="DistributedFileSystem" extends="org.apache.hadoop.fs.FileSystem"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DistributedFileSystem"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DistributedFileSystem" type="java.net.InetSocketAddress, org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="deprecated, no comment">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@deprecated]]>
- </doc>
- </constructor>
- <method name="getName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="deprecated, no comment">
- <doc>
- <![CDATA[@deprecated]]>
- </doc>
- </method>
- <method name="getUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="uri" type="java.net.URI"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="checkPath"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="path" type="org.apache.hadoop.fs.Path"/>
- <doc>
- <![CDATA[Permit paths which explicitly specify the default port.]]>
- </doc>
- </method>
- <method name="makeQualified" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="org.apache.hadoop.fs.Path"/>
- <doc>
- <![CDATA[Normalize paths that explicitly specify the default port.]]>
- </doc>
- </method>
- <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDefaultBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDefaultReplication" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setWorkingDirectory"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dir" type="org.apache.hadoop.fs.Path"/>
- </method>
- <method name="getHomeDirectory" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getFileBlockLocations" return="org.apache.hadoop.fs.BlockLocation[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="file" type="org.apache.hadoop.fs.FileStatus"/>
- <param name="start" type="long"/>
- <param name="len" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setVerifyChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="verifyChecksum" type="boolean"/>
- </method>
- <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="bufferSize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="bufferSize" type="int"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[This optional operation is not yet supported.]]>
- </doc>
- </method>
- <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="overwrite" type="boolean"/>
- <param name="bufferSize" type="int"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="dst" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Rename files/dirs]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get rid of Path f, whether a true file or dir.]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[requires a boolean check to delete a non
- empty directory recursively.]]>
- </doc>
- </method>
- <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setQuota"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="namespaceQuota" type="long"/>
- <param name="diskspaceQuota" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set a directory's quotas
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setQuota(String, long, long)]]>
- </doc>
- </method>
- <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getClient" return="org.apache.hadoop.hdfs.DFSClient"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDiskStatus" return="org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the disk usage of the filesystem, including total capacity,
- used space, and remaining space]]>
- </doc>
- </method>
- <method name="getRawCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the total raw capacity of the filesystem, disregarding
- replication .]]>
- </doc>
- </method>
- <method name="getRawUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the total raw used space in the filesystem, disregarding
- replication .]]>
- </doc>
- </method>
- <method name="getMissingBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with no good replicas left. Normally should be
- zero.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getUnderReplicatedBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with one of more replica missing.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getCorruptBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns count of blocks with at least one replica marked corrupt.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getDataNodeStats" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return statistics for each datanode.]]>
- </doc>
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Enter, leave or get safe mode.
-
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#setSafeMode(
- FSConstants.SafeModeAction)]]>
- </doc>
- </method>
- <method name="saveNamespace"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Save namespace image.
-
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
- </doc>
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refreshes the list of hosts and excluded hosts from the configured
- files.]]>
- </doc>
- </method>
- <method name="finalizeUpgrade"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Finalize previously upgraded files system state.
- @throws IOException]]>
- </doc>
- </method>
- <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="metaSave"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="pathname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="reportChecksumFailure" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="in" type="org.apache.hadoop.fs.FSDataInputStream"/>
- <param name="inPos" type="long"/>
- <param name="sums" type="org.apache.hadoop.fs.FSDataInputStream"/>
- <param name="sumsPos" type="long"/>
- <doc>
- <![CDATA[We need to find the blocks that didn't match. Likely only one
- is corrupt but we will report both to the namenode. In the future,
- we can consider figuring out exactly which block is corrupt.]]>
- </doc>
- </method>
- <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the stat information about the file.
- @throws FileNotFoundException if the file does not exist.]]>
- </doc>
- </method>
- <method name="getFileChecksum" return="org.apache.hadoop.fs.MD5MD5CRC32FileChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc }]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc }]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="p" type="org.apache.hadoop.fs.Path"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc }]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Implementation of the abstract FileSystem for the DFS system.
- This object is the way end-user code interacts with a Hadoop
- DistributedFileSystem.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DistributedFileSystem -->
- <!-- start class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
- <class name="DistributedFileSystem.DiskStatus" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DistributedFileSystem.DiskStatus" type="long, long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.DistributedFileSystem.DiskStatus -->
- <!-- start class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
- <class name="HDFSPolicyProvider" extends="org.apache.hadoop.security.authorize.PolicyProvider"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HDFSPolicyProvider"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getServices" return="org.apache.hadoop.security.authorize.Service[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[{@link PolicyProvider} for HDFS protocols.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HDFSPolicyProvider -->
- <!-- start class org.apache.hadoop.hdfs.HftpFileSystem -->
- <class name="HftpFileSystem" extends="org.apache.hadoop.fs.FileSystem"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HftpFileSystem"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.net.URI"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="pickOneAddress" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="hostname" type="java.lang.String"/>
- <exception name="UnknownHostException" type="java.net.UnknownHostException"/>
- <doc>
- <![CDATA[randomly pick one from all available IP addresses of a given hostname]]>
- </doc>
- </method>
- <method name="getUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="openConnection" return="java.net.HttpURLConnection"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="query" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Open an HTTP connection to the namenode to read file data and metadata.
- @param path The path component of the URL
- @param query The query component of the URL]]>
- </doc>
- </method>
- <method name="open" return="org.apache.hadoop.fs.FSDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="buffersize" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="listStatus" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFileStatus" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFileChecksum" return="org.apache.hadoop.fs.FileChecksum"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getWorkingDirectory" return="org.apache.hadoop.fs.Path"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setWorkingDirectory"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- </method>
- <method name="append" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="bufferSize" type="int"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[This optional operation is not yet supported.]]>
- </doc>
- </method>
- <method name="create" return="org.apache.hadoop.fs.FSDataOutputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="overwrite" type="boolean"/>
- <param name="bufferSize" type="int"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <param name="progress" type="org.apache.hadoop.util.Progressable"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="org.apache.hadoop.fs.Path"/>
- <param name="dst" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="org.apache.hadoop.fs.Path"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="nnAddr" type="java.net.InetSocketAddress"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="ugi" type="org.apache.hadoop.security.UserGroupInformation"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="ran" type="java.util.Random"
- transient="false" volatile="false"
- static="false" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="df" type="java.text.SimpleDateFormat"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[An implementation of a protocol for accessing filesystems over HTTP.
- The following implementation provides a limited, read-only interface
- to a filesystem over HTTP.
- @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
- @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HftpFileSystem -->
- <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem -->
- <class name="HsftpFileSystem" extends="org.apache.hadoop.hdfs.HftpFileSystem"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="HsftpFileSystem"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="initialize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.net.URI"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="openConnection" return="java.net.HttpURLConnection"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="query" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[An implementation of a protocol for accessing filesystems over HTTPS.
- The following implementation provides a limited, read-only interface
- to a filesystem over HTTPS.
- @see org.apache.hadoop.hdfs.server.namenode.ListPathsServlet
- @see org.apache.hadoop.hdfs.server.namenode.FileDataServlet]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem -->
- <!-- start class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
- <class name="HsftpFileSystem.DummyHostnameVerifier" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="protected"
- deprecated="not deprecated">
- <implements name="javax.net.ssl.HostnameVerifier"/>
- <constructor name="HsftpFileSystem.DummyHostnameVerifier"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="verify" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="hostname" type="java.lang.String"/>
- <param name="session" type="javax.net.ssl.SSLSession"/>
- </method>
- <doc>
- <![CDATA[Dummy hostname verifier that is used to bypass hostname checking]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.HsftpFileSystem.DummyHostnameVerifier -->
- <doc>
- <![CDATA[<p>A distributed implementation of {@link
- org.apache.hadoop.fs.FileSystem}. This is loosely modelled after
- Google's <a href="http://labs.google.com/papers/gfs.html">GFS</a>.</p>
- <p>The most important difference is that unlike GFS, Hadoop DFS files
- have strictly one writer at any one time. Bytes are always appended
- to the end of the writer's stream. There is no notion of "record appends"
- or "mutations" that are then checked or reordered. Writers simply emit
- a byte stream. That byte stream is guaranteed to be stored in the
- order written.</p>]]>
- </doc>
- </package>
- <package name="org.apache.hadoop.hdfs.protocol">
- <!-- start class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
- <class name="AlreadyBeingCreatedException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="AlreadyBeingCreatedException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The exception that happens when you ask to create a file that already
- is being created, but is not closed yet.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.AlreadyBeingCreatedException -->
- <!-- start class org.apache.hadoop.hdfs.protocol.Block -->
- <class name="Block" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <implements name="java.lang.Comparable"/>
- <constructor name="Block"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Block" type="long, long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Block" type="long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Block" type="org.apache.hadoop.hdfs.protocol.Block"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Block" type="java.io.File, long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Find the blockid from the given filename]]>
- </doc>
- </constructor>
- <method name="isBlockFilename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="f" type="java.io.File"/>
- </method>
- <method name="set"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blkid" type="long"/>
- <param name="len" type="long"/>
- <param name="genStamp" type="long"/>
- </method>
- <method name="getBlockId" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setBlockId"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="bid" type="long"/>
- </method>
- <method name="getBlockName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getNumBytes" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setNumBytes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="len" type="long"/>
- </method>
- <method name="getGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setGenerationStamp"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="stamp" type="long"/>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="GRANDFATHER_GENERATION_STAMP" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[A Block is a Hadoop FS primitive, identified by a
- long.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.Block -->
- <!-- start class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
- <class name="BlockListAsLongs" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockListAsLongs" type="long[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Constructor
- @param iBlockList - BlockListALongs create from this long[] parameter]]>
- </doc>
- </constructor>
- <method name="convertToArrayLongs" return="long[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blockArray" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <doc>
- <![CDATA[Converting a block[] to a long[]
- @param blockArray - the input array block[]
- @return the output array of long[]]]>
- </doc>
- </method>
- <method name="getNumberOfBlocks" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The number of blocks
- @return - the number of blocks]]>
- </doc>
- </method>
- <method name="getBlockId" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- <doc>
- <![CDATA[The block-id of the indexTh block
- @param index - the block whose block-id is desired
- @return the block-id]]>
- </doc>
- </method>
- <method name="getBlockLen" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- <doc>
- <![CDATA[The block-len of the indexTh block
- @param index - the block whose block-len is desired
- @return - the block-len]]>
- </doc>
- </method>
- <method name="getBlockGenStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- <doc>
- <![CDATA[The generation stamp of the indexTh block
- @param index - the block whose block-len is desired
- @return - the generation stamp]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This class provides an interface for accessing list of blocks that
- has been implemented as long[].
- This class is usefull for block report. Rather than send block reports
- as a Block[] we can send it as a long[].]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.BlockListAsLongs -->
- <!-- start interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
- <interface name="ClientDatanodeProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="recoverBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="keepLength" type="boolean"/>
- <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Start generation-stamp recovery for specified block
- @param block the specified block
- @param keepLength keep the block length
- @param targets the list of possible locations of specified block
- @return the new blockid if recovery successful and the generation stamp
- got updated as part of the recovery, else returns null if the block id
- not have any data and the block was deleted.
- @throws IOException]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[3: add keepLength parameter.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[An client-datanode protocol for block recovery]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol -->
- <!-- start interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
- <interface name="ClientProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="offset" type="long"/>
- <param name="length" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get locations of the blocks of the specified file within the specified range.
- DataNode locations for each block are sorted by
- the proximity to the client.
- <p>
- Return {@link LocatedBlocks} which contains
- file length, blocks and their locations.
- DataNode locations for each block are sorted by
- the distance to the client's address.
- <p>
- The client will then have to contact
- one of the indicated DataNodes to obtain the actual data.
-
- @param src file name
- @param offset range start offset
- @param length range length
- @return file length and array of blocks with their locations
- @throws IOException]]>
- </doc>
- </method>
- <method name="create"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a new file entry in the namespace.
- <p>
- This will create an empty file specified by the source path.
- The path should reflect a full path originated at the root.
- The name-node does not have a notion of "current" directory for a client.
- <p>
- Once created, the file is visible and available for read to other clients.
- Although, other clients cannot {@link #delete(String)}, re-create or
- {@link #rename(String, String)} it until the file is completed
- or explicitly as a result of lease expiration.
- <p>
- Blocks have a maximum size. Clients that intend to
- create multi-block files must also use {@link #addBlock(String, String)}.
- @param src path of the file being created.
- @param masked masked permission.
- @param clientName name of the current client.
- @param overwrite indicates whether the file should be
- overwritten if it already exists.
- @param replication block replication factor.
- @param blockSize maximum block size.
-
- @throws AccessControlException if permission to create file is
- denied by the system. As usually on the client side the exception will
- be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
- @throws QuotaExceededException if the file creation violates
- any quota restriction
- @throws IOException if other errors occur.]]>
- </doc>
- </method>
- <method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Append to the end of the file.
- @param src path of the file being created.
- @param clientName name of the current client.
- @return information about the last partial block if any.
- @throws AccessControlException if permission to append file is
- denied by the system. As usually on the client side the exception will
- be wrapped into {@link org.apache.hadoop.ipc.RemoteException}.
- Allows appending to an existing file if the server is
- configured with the parameter dfs.support.append set to true, otherwise
- throws an IOException.
- @throws IOException if other errors occur.]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set replication for an existing file.
- <p>
- The NameNode sets replication to the new value and returns.
- The actual block replication is not expected to be performed during
- this method call. The blocks will be populated or removed in the
- background as the result of the routine block maintenance procedures.
-
- @param src file name
- @param replication new replication
- @throws IOException
- @return true if successful;
- false if file does not exist or is a directory]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set permissions for an existing file/directory.]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set owner of a path (i.e. a file or a directory).
- The parameters username and groupname cannot both be null.
- @param src
- @param username If it is null, the original username remains unchanged.
- @param groupname If it is null, the original groupname remains unchanged.]]>
- </doc>
- </method>
- <method name="abandonBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="src" type="java.lang.String"/>
- <param name="holder" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client can give up on a blcok by calling abandonBlock().
- The client can then
- either obtain a new block, or complete or abandon the file.
- Any partial writes to the block will be discarded.]]>
- </doc>
- </method>
- <method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[A client that wants to write an additional block to the
- indicated filename (which must currently be open for writing)
- should call addBlock().
- addBlock() allocates a new block and datanodes the block data
- should be replicated to.
-
- @return LocatedBlock allocated block information.]]>
- </doc>
- </method>
- <method name="complete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client is done writing data to the given filename, and would
- like to complete it.
- The function returns whether the file has been closed successfully.
- If the function returns false, the caller should try again.
- A call to complete() will not return true until all the file's
- blocks have been replicated the minimum number of times. Thus,
- DataNode failures may cause a client to call complete() several
- times before succeeding.]]>
- </doc>
- </method>
- <method name="reportBadBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client wants to report corrupted blocks (blocks with specified
- locations on datanodes).
- @param blocks Array of located blocks to report]]>
- </doc>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Rename an item in the file system namespace.
-
- @param src existing file or directory name.
- @param dst new name.
- @return true if successful, or false if the old name does not exist
- or if the new name already belongs to the namespace.
- @throws IOException if the new name is invalid.
- @throws QuotaExceededException if the rename would violate
- any quota restriction]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Delete the given file or directory from the file system.
- <p>
- Any blocks belonging to the deleted files will be garbage-collected.
-
- @param src existing name.
- @return true only if the existing file or directory was actually removed
- from the file system.]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Delete the given file or directory from the file system.
- <p>
- same as delete but provides a way to avoid accidentally
- deleting non empty directories programmatically.
- @param src existing name
- @param recursive if true deletes a non empty directory recursively,
- else throws an exception.
- @return true only if the existing file or directory was actually removed
- from the file system.]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a directory (or hierarchy of directories) with the given
- name and permission.
- @param src The path of the directory being created
- @param masked The masked permission of the directory being created
- @return True if the operation success.
- @throws {@link AccessControlException} if permission to create file is
- denied by the system. As usually on the client side the exception will
- be wraped into {@link org.apache.hadoop.ipc.RemoteException}.
- @throws QuotaExceededException if the operation would violate
- any quota restriction.]]>
- </doc>
- </method>
- <method name="getListing" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a listing of the indicated directory]]>
- </doc>
- </method>
- <method name="renewLease"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Client programs can cause stateful changes in the NameNode
- that affect other clients. A client may obtain a file and
- neither abandon nor complete it. A client might hold a series
- of locks that prevent other clients from proceeding.
- Clearly, it would be bad if a client held a bunch of locks
- that it never gave up. This can happen easily if the client
- dies unexpectedly.
- <p>
- So, the NameNode will revoke the locks and live file-creates
- for clients that it thinks have died. A client tells the
- NameNode that it is still alive by periodically calling
- renewLease(). If a certain amount of time passes since
- the last call to renewLease(), the NameNode assumes the
- client has died.]]>
- </doc>
- </method>
- <method name="getStats" return="long[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a set of statistics about the filesystem.
- Right now, only three values are returned.
- <ul>
- <li> [0] contains the total storage capacity of the system, in bytes.</li>
- <li> [1] contains the total used space of the system, in bytes.</li>
- <li> [2] contains the available storage of the system, in bytes.</li>
- <li> [3] contains number of under replicated blocks in the system.</li>
- <li> [4] contains number of blocks with a corrupt replica. </li>
- <li> [5] contains number of blocks without any good replicas left. </li>
- </ul>
- Use public constants like {@link #GET_STATS_CAPACITY_IDX} in place of
- actual numbers to index into the array.]]>
- </doc>
- </method>
- <method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a report on the system's current datanodes.
- One DatanodeInfo object is returned for each DataNode.
- Return live datanodes if type is LIVE; dead datanodes if type is DEAD;
- otherwise all datanodes if type is ALL.]]>
- </doc>
- </method>
- <method name="getPreferredBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="filename" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the block size for the given file.
- @param filename The name of the file
- @return The number of bytes in each block
- @throws IOException]]>
- </doc>
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Enter, leave or get safe mode.
- <p>
- Safe mode is a name node state when it
- <ol><li>does not accept changes to name space (read-only), and</li>
- <li>does not replicate or delete blocks.</li></ol>
-
- <p>
- Safe mode is entered automatically at name node startup.
- Safe mode can also be entered manually using
- {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}.
- <p>
- At startup the name node accepts data node reports collecting
- information about block locations.
- In order to leave safe mode it needs to collect a configurable
- percentage called threshold of blocks, which satisfy the minimal
- replication condition.
- The minimal replication condition is that each block must have at least
- <tt>dfs.replication.min</tt> replicas.
- When the threshold is reached the name node extends safe mode
- for a configurable amount of time
- to let the remaining data nodes to check in before it
- will start replicating missing blocks.
- Then the name node leaves safe mode.
- <p>
- If safe mode is turned on manually using
- {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_ENTER)}
- then the name node stays in safe mode until it is manually turned off
- using {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_LEAVE)}.
- Current state of the name node can be verified using
- {@link #setSafeMode(FSConstants.SafeModeAction) setSafeMode(SafeModeAction.SAFEMODE_GET)}
- <h4>Configuration parameters:</h4>
- <tt>dfs.safemode.threshold.pct</tt> is the threshold parameter.<br>
- <tt>dfs.safemode.extension</tt> is the safe mode extension parameter.<br>
- <tt>dfs.replication.min</tt> is the minimal replication parameter.
-
- <h4>Special cases:</h4>
- The name node does not enter safe mode at startup if the threshold is
- set to 0 or if the name space is empty.<br>
- If the threshold is set to 1 then all blocks need to have at least
- minimal replication.<br>
- If the threshold value is greater than 1 then the name node will not be
- able to turn off safe mode automatically.<br>
- Safe mode can always be turned off manually.
-
- @param action <ul> <li>0 leave safe mode;</li>
- <li>1 enter safe mode;</li>
- <li>2 get safe mode state.</li></ul>
- @return <ul><li>0 if the safe mode is OFF or</li>
- <li>1 if the safe mode is ON.</li></ul>
- @throws IOException]]>
- </doc>
- </method>
- <method name="saveNamespace"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Save namespace image.
- <p>
- Saves current namespace into storage directories and reset edits log.
- Requires superuser privilege and safe mode.
-
- @throws AccessControlException if the superuser privilege is violated.
- @throws IOException if image creation failed.]]>
- </doc>
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Tells the namenode to reread the hosts and exclude files.
- @throws IOException]]>
- </doc>
- </method>
- <method name="finalizeUpgrade"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Finalize previous upgrade.
- Remove file system state saved during the upgrade.
- The upgrade will become irreversible.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Report distributed upgrade progress or force current upgrade to proceed.
-
- @param action {@link FSConstants.UpgradeAction} to perform
- @return upgrade status information or null if no upgrades are in progress
- @throws IOException]]>
- </doc>
- </method>
- <method name="metaSave"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="filename" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Dumps namenode data structures into specified file. If file
- already exists, then append.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getFileInfo" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the file info for a specific file or directory.
- @param src The string representation of the path to the file
- @throws IOException if permission to access file is denied by the system
- @return object containing information regarding the file
- or null if file not found]]>
- </doc>
- </method>
- <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get {@link ContentSummary} rooted at the specified directory.
- @param path The string representation of the path]]>
- </doc>
- </method>
- <method name="setQuota"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="namespaceQuota" type="long"/>
- <param name="diskspaceQuota" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set the quota for a directory.
- @param path The string representation of the path to the directory
- @param namespaceQuota Limit on the number of names in the tree rooted
- at the directory
- @param diskspaceQuota Limit on disk space occupied all the files under
- this directory.
- <br><br>
-
- The quota can have three types of values : (1) 0 or more will set
- the quota to that value, (2) {@link FSConstants#QUOTA_DONT_SET} implies
- the quota will not be changed, and (3) {@link FSConstants#QUOTA_RESET}
- implies the quota will be reset. Any other value is a runtime error.
-
- @throws FileNotFoundException if the path is a file or
- does not exist
- @throws QuotaExceededException if the directory size
- is greater than the given quota]]>
- </doc>
- </method>
- <method name="fsync"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="client" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write all metadata for this file into persistent storage.
- The file must be currently open for writing.
- @param src The string representation of the path
- @param client The string representation of the client]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Sets the modification and access time of the file to the specified time.
- @param src The string representation of the path
- @param mtime The number of milliseconds since Jan 1, 1970.
- Setting mtime to -1 means that modification time should not be set
- by this call.
- @param atime The number of milliseconds since Jan 1, 1970.
- Setting atime to -1 means that access time should not be set
- by this call.]]>
- </doc>
- </method>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Compared to the previous version the following changes have been introduced:
- (Only the latest change is reflected.
- The log of historical changes can be retrieved from the svn).
- 41: saveNamespace introduced.]]>
- </doc>
- </field>
- <field name="GET_STATS_CAPACITY_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_USED_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_REMAINING_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_UNDER_REPLICATED_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_CORRUPT_BLOCKS_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="GET_STATS_MISSING_BLOCKS_IDX" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[ClientProtocol is used by user code via
- {@link org.apache.hadoop.hdfs.DistributedFileSystem} class to communicate
- with the NameNode. User code can manipulate the directory namespace,
- as well as open/close file streams, etc.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.protocol.ClientProtocol -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeID -->
- <class name="DatanodeID" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.WritableComparable"/>
- <constructor name="DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Equivalent to DatanodeID("").]]>
- </doc>
- </constructor>
- <constructor name="DatanodeID" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Equivalent to DatanodeID(nodeName, "", -1, -1).]]>
- </doc>
- </constructor>
- <constructor name="DatanodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeID copy constructor
-
- @param from]]>
- </doc>
- </constructor>
- <constructor name="DatanodeID" type="java.lang.String, java.lang.String, int, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create DatanodeID
- @param nodeName (hostname:portNumber)
- @param storageID data storage ID
- @param infoPort info server port
- @param ipcPort ipc server port]]>
- </doc>
- </constructor>
- <method name="getName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return hostname:portNumber.]]>
- </doc>
- </method>
- <method name="getStorageID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return data storage ID.]]>
- </doc>
- </method>
- <method name="getInfoPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return infoPort (the port at which the HTTP server bound to)]]>
- </doc>
- </method>
- <method name="getIpcPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return ipcPort (the port at which the IPC server bound to)]]>
- </doc>
- </method>
- <method name="setStorageID"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="storageID" type="java.lang.String"/>
- <doc>
- <![CDATA[sets the data storage ID.]]>
- </doc>
- </method>
- <method name="getHost" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return hostname and no :portNumber.]]>
- </doc>
- </method>
- <method name="getPort" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="to" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="updateRegInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <doc>
- <![CDATA[Update fields when a new registration request comes in.
- Note that this does not update storageID.]]>
- </doc>
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="that" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <doc>
- <![CDATA[Comparable.
- Basis of compare is the String name (host:portNumber) only.
- @param that
- @return as specified by Comparable.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="EMPTY_ARRAY" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="name" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="storageID" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="infoPort" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="ipcPort" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DatanodeID is composed of the data node
- name (hostname:portNumber) and the data storage ID,
- which it currently represents.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeID -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
- <class name="DatanodeInfo" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.net.Node"/>
- <constructor name="DatanodeInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="DatanodeInfo" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="getCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The raw capacity.]]>
- </doc>
- </method>
- <method name="getDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The used space by the data node.]]>
- </doc>
- </method>
- <method name="getNonDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The used space by the data node.]]>
- </doc>
- </method>
- <method name="getDfsUsedPercent" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The used space by the data node as percentage of present capacity]]>
- </doc>
- </method>
- <method name="getRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The raw free space.]]>
- </doc>
- </method>
- <method name="getRemainingPercent" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The remaining space as percentage of configured capacity.]]>
- </doc>
- </method>
- <method name="getLastUpdate" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The time when this information was accurate.]]>
- </doc>
- </method>
- <method name="getXceiverCount" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[number of active connections]]>
- </doc>
- </method>
- <method name="setCapacity"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="capacity" type="long"/>
- <doc>
- <![CDATA[Sets raw capacity.]]>
- </doc>
- </method>
- <method name="setRemaining"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="remaining" type="long"/>
- <doc>
- <![CDATA[Sets raw free space.]]>
- </doc>
- </method>
- <method name="setLastUpdate"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="lastUpdate" type="long"/>
- <doc>
- <![CDATA[Sets time when this information was accurate.]]>
- </doc>
- </method>
- <method name="setXceiverCount"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="xceiverCount" type="int"/>
- <doc>
- <![CDATA[Sets number of active connections]]>
- </doc>
- </method>
- <method name="getNetworkLocation" return="java.lang.String"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[rack name]]>
- </doc>
- </method>
- <method name="setNetworkLocation"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="location" type="java.lang.String"/>
- <doc>
- <![CDATA[Sets the rack name]]>
- </doc>
- </method>
- <method name="getHostName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setHostName"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="host" type="java.lang.String"/>
- </method>
- <method name="getDatanodeReport" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[A formatted string for reporting the status of the DataNode.]]>
- </doc>
- </method>
- <method name="dumpDatanode" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[A formatted string for printing the status of the DataNode.]]>
- </doc>
- </method>
- <method name="startDecommission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Start decommissioning a node.
- old state.]]>
- </doc>
- </method>
- <method name="stopDecommission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Stop decommissioning a node.
- old state.]]>
- </doc>
- </method>
- <method name="isDecommissionInProgress" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns true if the node is in the process of being decommissioned]]>
- </doc>
- </method>
- <method name="isDecommissioned" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns true if the node has been decommissioned.]]>
- </doc>
- </method>
- <method name="setDecommissioned"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Sets the admin state to indicate that decommision is complete.]]>
- </doc>
- </method>
- <method name="setAdminState"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="newState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"/>
- <doc>
- <![CDATA[Sets the admin state of this node.]]>
- </doc>
- </method>
- <method name="getParent" return="org.apache.hadoop.net.Node"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return this node's parent]]>
- </doc>
- </method>
- <method name="setParent"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="parent" type="org.apache.hadoop.net.Node"/>
- </method>
- <method name="getLevel" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return this node's level in the tree.
- E.g. the root of a tree returns 0 and its children return 1]]>
- </doc>
- </method>
- <method name="setLevel"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="level" type="int"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="capacity" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="dfsUsed" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="remaining" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="lastUpdate" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="xceiverCount" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="location" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="hostName" type="java.lang.String"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[HostName as suplied by the datanode during registration as its
- name. Namenode uses datanode IP address as the name.]]>
- </doc>
- </field>
- <field name="adminState" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DatanodeInfo represents the status of a DataNode.
- This object is used for communication in the
- Datanode Protocol and the Client Protocol.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo -->
- <!-- start class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
- <class name="DatanodeInfo.AdminStates" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="NORMAL" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DECOMMISSION_INPROGRESS" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DECOMMISSIONED" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.DatanodeInfo.AdminStates -->
- <!-- start interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
- <interface name="DataTransferProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <field name="DATA_TRANSFER_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Version for data transfers between clients and datanodes
- This should change when serialization of DatanodeInfo, not just
- when protocol changes. It is not very obvious.]]>
- </doc>
- </field>
- <field name="OP_WRITE_BLOCK" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="OP_READ_BLOCK" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="OP_READ_METADATA" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="OP_REPLACE_BLOCK" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="OP_COPY_BLOCK" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="OP_BLOCK_CHECKSUM" type="byte"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="OP_STATUS_SUCCESS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="OP_STATUS_ERROR" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="OP_STATUS_ERROR_CHECKSUM" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="OP_STATUS_ERROR_INVALID" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="OP_STATUS_ERROR_EXISTS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="OP_STATUS_CHECKSUM_OK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[The Client transfers data to/from datanode using a streaming protocol.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.protocol.DataTransferProtocol -->
- <!-- start interface org.apache.hadoop.hdfs.protocol.FSConstants -->
- <interface name="FSConstants" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <field name="MIN_BLOCKS_FOR_WRITE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BLOCK_INVALIDATE_CHUNK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="QUOTA_DONT_SET" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="QUOTA_RESET" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="HEARTBEAT_INTERVAL" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BLOCKREPORT_INTERVAL" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BLOCKREPORT_INITIAL_DELAY" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LEASE_SOFTLIMIT_PERIOD" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LEASE_HARDLIMIT_PERIOD" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LEASE_RECOVER_PERIOD" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="MAX_PATH_LENGTH" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="MAX_PATH_DEPTH" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="BUFFER_SIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SMALL_BUFFER_SIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_BLOCK_SIZE" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEFAULT_DATA_SOCKET_SIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SIZE_OF_INTEGER" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LAYOUT_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Some handy constants]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.protocol.FSConstants -->
- <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
- <class name="FSConstants.DatanodeReportType" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="ALL" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LIVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DEAD" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType -->
- <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
- <class name="FSConstants.SafeModeAction" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="SAFEMODE_LEAVE" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SAFEMODE_ENTER" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="SAFEMODE_GET" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction -->
- <!-- start class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
- <class name="FSConstants.UpgradeAction" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="GET_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DETAILED_STATUS" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FORCE_PROCEED" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Distributed upgrade actions:
-
- 1. Get upgrade status.
- 2. Get detailed upgrade status.
- 3. Proceed with the upgrade if it is stuck, no matter what the status is.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction -->
- <!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
- <class name="LocatedBlock" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="LocatedBlock"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="LocatedBlock" type="org.apache.hadoop.hdfs.protocol.Block, org.apache.hadoop.hdfs.protocol.DatanodeInfo[], long, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getLocations" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getStartOffset" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isCorrupt" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[A LocatedBlock is a pair of Block, DatanodeInfo[]
- objects. It tells where to find a Block.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlock -->
- <!-- start class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
- <class name="LocatedBlocks" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="LocatedBlocks" type="long, java.util.List, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getLocatedBlocks" return="java.util.List"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get located blocks.]]>
- </doc>
- </method>
- <method name="get" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="index" type="int"/>
- <doc>
- <![CDATA[Get located block.]]>
- </doc>
- </method>
- <method name="locatedBlockCount" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get number of located blocks.]]>
- </doc>
- </method>
- <method name="getFileLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isUnderConstruction" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return ture if file was under construction when
- this LocatedBlocks was constructed, false otherwise.]]>
- </doc>
- </method>
- <method name="findBlock" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="offset" type="long"/>
- <doc>
- <![CDATA[Find block containing specified offset.
-
- @return block if found, or null otherwise.]]>
- </doc>
- </method>
- <method name="insertRange"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blockIdx" type="int"/>
- <param name="newBlocks" type="java.util.List"/>
- </method>
- <method name="getInsertIndex" return="int"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="binSearchResult" type="int"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Collection of blocks with their locations and the file length.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.LocatedBlocks -->
- <!-- start class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
- <class name="QuotaExceededException" extends="java.io.IOException"
- abstract="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <constructor name="QuotaExceededException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="QuotaExceededException" type="long, long, long, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="setPathName"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- </method>
- <method name="getMessage" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[This exception is thrown when modification to HDFS results in violation
- of a directory quota. A directory quota might be namespace quota (limit
- on number of files and directories) or a diskspace quota (limit on space
- taken by all the file under the directory tree). <br> <br>
-
- The message for the exception specifies the directory where the quota
- was violated and actual quotas.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.QuotaExceededException -->
- <!-- start class org.apache.hadoop.hdfs.protocol.UnregisteredDatanodeException -->
- <class name="UnregisteredDatanodeException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UnregisteredDatanodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="UnregisteredDatanodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID, org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[This exception is thrown when a datanode that has not previously
- registered is trying to access the name node.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.protocol.UnregisteredDatanodeException -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.balancer">
- <!-- start class org.apache.hadoop.hdfs.server.balancer.Balancer -->
- <class name="Balancer" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.util.Tool"/>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <doc>
- <![CDATA[Run a balancer
- @param args]]>
- </doc>
- </method>
- <method name="run" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[main method of Balancer
- @param args arguments to a Balancer
- @exception any exception occurs during datanode balancing]]>
- </doc>
- </method>
- <method name="getConf" return="org.apache.hadoop.conf.Configuration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[return this balancer's configuration]]>
- </doc>
- </method>
- <method name="setConf"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <doc>
- <![CDATA[set this balancer's configuration]]>
- </doc>
- </method>
- <field name="MAX_NUM_CONCURRENT_MOVES" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The maximum number of concurrent blocks moves for
- balancing purpose at a datanode]]>
- </doc>
- </field>
- <field name="SUCCESS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ALREADY_RUNNING" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NO_MOVE_BLOCK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NO_MOVE_PROGRESS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="IO_EXCEPTION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ILLEGAL_ARGS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[<p>The balancer is a tool that balances disk space usage on an HDFS cluster
- when some datanodes become full or when new empty nodes join the cluster.
- The tool is deployed as an application program that can be run by the
- cluster administrator on a live HDFS cluster while applications
- adding and deleting files.
-
- <p>SYNOPSIS
- <pre>
- To start:
- bin/start-balancer.sh [-threshold <threshold>]
- Example: bin/ start-balancer.sh
- start the balancer with a default threshold of 10%
- bin/ start-balancer.sh -threshold 5
- start the balancer with a threshold of 5%
- To stop:
- bin/ stop-balancer.sh
- </pre>
-
- <p>DESCRIPTION
- <p>The threshold parameter is a fraction in the range of (0%, 100%) with a
- default value of 10%. The threshold sets a target for whether the cluster
- is balanced. A cluster is balanced if for each datanode, the utilization
- of the node (ratio of used space at the node to total capacity of the node)
- differs from the utilization of the (ratio of used space in the cluster
- to total capacity of the cluster) by no more than the threshold value.
- The smaller the threshold, the more balanced a cluster will become.
- It takes more time to run the balancer for small threshold values.
- Also for a very small threshold the cluster may not be able to reach the
- balanced state when applications write and delete files concurrently.
-
- <p>The tool moves blocks from highly utilized datanodes to poorly
- utilized datanodes iteratively. In each iteration a datanode moves or
- receives no more than the lesser of 10G bytes or the threshold fraction
- of its capacity. Each iteration runs no more than 20 minutes.
- At the end of each iteration, the balancer obtains updated datanodes
- information from the namenode.
-
- <p>A system property that limits the balancer's use of bandwidth is
- defined in the default configuration file:
- <pre>
- <property>
- <name>dfs.balance.bandwidthPerSec</name>
- <value>1048576</value>
- <description> Specifies the maximum bandwidth that each datanode
- can utilize for the balancing purpose in term of the number of bytes
- per second. </description>
- </property>
- </pre>
-
- <p>This property determines the maximum speed at which a block will be
- moved from one datanode to another. The default value is 1MB/s. The higher
- the bandwidth, the faster a cluster can reach the balanced state,
- but with greater competition with application processes. If an
- administrator changes the value of this property in the configuration
- file, the change is observed when HDFS is next restarted.
-
- <p>MONITERING BALANCER PROGRESS
- <p>After the balancer is started, an output file name where the balancer
- progress will be recorded is printed on the screen. The administrator
- can monitor the running of the balancer by reading the output file.
- The output shows the balancer's status iteration by iteration. In each
- iteration it prints the starting time, the iteration number, the total
- number of bytes that have been moved in the previous iterations,
- the total number of bytes that are left to move in order for the cluster
- to be balanced, and the number of bytes that are being moved in this
- iteration. Normally "Bytes Already Moved" is increasing while "Bytes Left
- To Move" is decreasing.
-
- <p>Running multiple instances of the balancer in an HDFS cluster is
- prohibited by the tool.
-
- <p>The balancer automatically exits when any of the following five
- conditions is satisfied:
- <ol>
- <li>The cluster is balanced;
- <li>No block can be moved;
- <li>No block has been moved for five consecutive iterations;
- <li>An IOException occurs while communicating with the namenode;
- <li>Another balancer is running.
- </ol>
-
- <p>Upon exit, a balancer returns an exit code and prints one of the
- following messages to the output file in corresponding to the above exit
- reasons:
- <ol>
- <li>The cluster is balanced. Exiting
- <li>No block can be moved. Exiting...
- <li>No block has been moved for 3 iterations. Exiting...
- <li>Received an IO exception: failure reason. Exiting...
- <li>Another balancer is running. Exiting...
- </ol>
-
- <p>The administrator can interrupt the execution of the balancer at any
- time by running the command "stop-balancer.sh" on the machine where the
- balancer is running.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.balancer.Balancer -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.common">
- <!-- start class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
- <class name="GenerationStamp" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.WritableComparable"/>
- <constructor name="GenerationStamp"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create a new instance, initialized to FIRST_VALID_STAMP.]]>
- </doc>
- </constructor>
- <method name="getStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the current generation stamp]]>
- </doc>
- </method>
- <method name="setStamp"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="stamp" type="long"/>
- <doc>
- <![CDATA[Sets the current generation stamp]]>
- </doc>
- </method>
- <method name="nextStamp" return="long"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[First increments the counter and then returns the stamp]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="compare" return="int"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="x" type="long"/>
- <param name="y" type="long"/>
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="that" type="org.apache.hadoop.hdfs.server.common.GenerationStamp"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="equalsWithWildcard" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="x" type="long"/>
- <param name="y" type="long"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="WILDCARD_STAMP" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FIRST_VALID_STAMP" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[A GenerationStamp is a Hadoop FS primitive, identified by a long.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.GenerationStamp -->
- <!-- start interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
- <interface name="HdfsConstants" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <field name="READ_TIMEOUT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="WRITE_TIMEOUT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="WRITE_TIMEOUT_EXTENSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Some handy internal HDFS constants]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.common.HdfsConstants -->
- <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
- <class name="HdfsConstants.NodeType" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="NAME_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DATA_NODE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Type of the node]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType -->
- <!-- start class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
- <class name="HdfsConstants.StartupOption" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getName" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="FORMAT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="REGULAR" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="UPGRADE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ROLLBACK" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FINALIZE" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="IMPORT" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption -->
- <!-- start class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
- <class name="InconsistentFSStateException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="InconsistentFSStateException" type="java.io.File, java.lang.String, java.lang.Throwable"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The exception is thrown when file system state is inconsistent
- and is not recoverable.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.InconsistentFSStateException -->
- <!-- start class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
- <class name="IncorrectVersionException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="IncorrectVersionException" type="int, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="IncorrectVersionException" type="int, java.lang.String, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The exception is thrown when external version does not match
- current version of the appication.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.IncorrectVersionException -->
- <!-- start class org.apache.hadoop.hdfs.server.common.Storage -->
- <class name="Storage" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create empty storage info of the specified type]]>
- </doc>
- </constructor>
- <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, int, long"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Storage" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType, org.apache.hadoop.hdfs.server.common.StorageInfo"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="dirIterator" return="java.util.Iterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return default iterator
- This iterator returns all entires of storageDirs]]>
- </doc>
- </method>
- <method name="dirIterator" return="java.util.Iterator"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dirType" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
- <doc>
- <![CDATA[Return iterator based on Storage Directory Type
- This iterator selects entires of storageDirs of type dirType and returns
- them via the Iterator]]>
- </doc>
- </method>
- <method name="getNumStorageDirs" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getStorageDir" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="idx" type="int"/>
- </method>
- <method name="addStorageDir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- </method>
- <method name="isConversionNeeded" return="boolean"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="checkVersionUpgradable"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="oldVersion" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Checks if the upgrade from the given old version is supported. If
- no upgrade is supported, it throws IncorrectVersionException.
-
- @param oldVersion]]>
- </doc>
- </method>
- <method name="getFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get common storage fields.
- Should be overloaded if additional fields need to be get.
-
- @param props
- @throws IOException]]>
- </doc>
- </method>
- <method name="setFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set common storage fields.
- Should be overloaded if additional fields need to be set.
-
- @param props
- @throws IOException]]>
- </doc>
- </method>
- <method name="rename"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="from" type="java.io.File"/>
- <param name="to" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="deleteDir"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="dir" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="writeAll"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write all data storage files.
- @throws IOException]]>
- </doc>
- </method>
- <method name="unlockAll"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Unlock all storage directories.
- @throws IOException]]>
- </doc>
- </method>
- <method name="isLockSupported" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="idx" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Check whether underlying file system supports file locking.
-
- @return <code>true</code> if exclusive locks are supported or
- <code>false</code> otherwise.
- @throws IOException
- @see StorageDirectory#lock()]]>
- </doc>
- </method>
- <method name="getBuildVersion" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRegistrationID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="storage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
- </method>
- <method name="corruptPreUpgradeStorage"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="rootDir" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="writeCorruptedData"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="file" type="java.io.RandomAccessFile"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LAST_PRE_UPGRADE_LAYOUT_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="LAST_UPGRADABLE_LAYOUT_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LAST_UPGRADABLE_HADOOP_VERSION" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="PRE_GENERATIONSTAMP_LAYOUT_VERSION" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="STORAGE_FILE_VERSION" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="STORAGE_DIR_CURRENT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="storageDirs" type="java.util.List"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Storage information file.
- <p>
- Local storage information is stored in a separate file VERSION.
- It contains type of the node,
- the storage layout version, the namespace id, and
- the fs state creation time.
- <p>
- Local storage can reside in multiple directories.
- Each directory should contain the same VERSION file as the others.
- During startup Hadoop servers (name-node and data-nodes) read their local
- storage information from them.
- <p>
- The servers hold a lock for each storage directory while they run so that
- other nodes were not able to startup sharing the same storage.
- The locks are released when the servers stop (normally or abnormally).]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.Storage -->
- <!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
- <class name="Storage.StorageDirectory" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="Storage.StorageDirectory" type="java.io.File"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="Storage.StorageDirectory" type="java.io.File, org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getRoot" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get root directory of this storage]]>
- </doc>
- </method>
- <method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get storage directory type]]>
- </doc>
- </method>
- <method name="read"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Read version file.
-
- @throws IOException if file cannot be read or contains inconsistent data]]>
- </doc>
- </method>
- <method name="read"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="from" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write version file.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="to" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="clearDirectory"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Clear and re-create storage directory.
- <p>
- Removes contents of the current directory and creates an empty directory.
-
- This does not fully format storage directory.
- It cannot write the version file since it should be written last after
- all other storage type dependent files are written.
- Derived storage is responsible for setting specific storage values and
- writing the version file to disk.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getCurrentDir" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getVersionFile" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getPreviousVersionFile" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getPreviousDir" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getPreviousTmp" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRemovedTmp" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getFinalizedTmp" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getLastCheckpointTmp" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getPreviousCheckpoint" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="analyzeStorage" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="startOpt" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.StartupOption"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Check consistency of the storage directory
-
- @param startOpt a startup option.
-
- @return state {@link StorageState} of the storage directory
- @throws {@link InconsistentFSStateException} if directory state is not
- consistent and cannot be recovered]]>
- </doc>
- </method>
- <method name="doRecover"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="curState" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Complete or recover storage state from previously failed transition.
-
- @param curState specifies what/how the state should be recovered
- @throws IOException]]>
- </doc>
- </method>
- <method name="lock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Lock storage to provide exclusive access.
-
- <p> Locking is not supported by all file systems.
- E.g., NFS does not consistently support exclusive locks.
-
- <p> If locking is supported we guarantee exculsive access to the
- storage directory. Otherwise, no guarantee is given.
-
- @throws IOException if locking fails]]>
- </doc>
- </method>
- <method name="unlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Unlock storage.
-
- @throws IOException]]>
- </doc>
- </method>
- <doc>
- <![CDATA[One of the storage directories.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory -->
- <!-- start interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
- <interface name="Storage.StorageDirType" abstract="true"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getStorageDirType" return="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isOfType" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirType"/>
- </method>
- <doc>
- <![CDATA[An interface to denote storage directory type
- Implementations can define a type for storage directory by implementing
- this interface.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.common.Storage.StorageDirType -->
- <!-- start class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
- <class name="Storage.StorageState" extends="java.lang.Enum"
- abstract="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <method name="values" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState[]"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="valueOf" return="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <field name="NON_EXISTENT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NOT_FORMATTED" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COMPLETE_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="RECOVER_UPGRADE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COMPLETE_FINALIZE" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COMPLETE_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="RECOVER_ROLLBACK" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="COMPLETE_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="RECOVER_CHECKPOINT" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NORMAL" type="org.apache.hadoop.hdfs.server.common.Storage.StorageState"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.Storage.StorageState -->
- <!-- start class org.apache.hadoop.hdfs.server.common.StorageInfo -->
- <class name="StorageInfo" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="StorageInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="StorageInfo" type="int, int, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="StorageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getLayoutVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getNamespaceID" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCTime" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setStorageInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="from" type="org.apache.hadoop.hdfs.server.common.StorageInfo"/>
- </method>
- <field name="layoutVersion" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="namespaceID" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="cTime" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Common class for storage information.
-
- TODO namespaceID should be long and computed as hash(address + port)]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.StorageInfo -->
- <!-- start interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
- <interface name="Upgradeable" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Comparable"/>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the layout version of the upgrade object.
- @return layout version]]>
- </doc>
- </method>
- <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the type of the software component, which this object is upgrading.
- @return type]]>
- </doc>
- </method>
- <method name="getDescription" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Description of the upgrade object for displaying.
- @return description]]>
- </doc>
- </method>
- <method name="getUpgradeStatus" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Upgrade status determines a percentage of the work done out of the total
- amount required by the upgrade.
-
- 100% means that the upgrade is completed.
- Any value < 100 means it is not complete.
-
- The return value should provide at least 2 values, e.g. 0 and 100.
- @return integer value in the range [0, 100].]]>
- </doc>
- </method>
- <method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Prepare for the upgrade.
- E.g. initialize upgrade data structures and set status to 0.
-
- Returns an upgrade command that is used for broadcasting to other cluster
- components.
- E.g. name-node informs data-nodes that they must perform a distributed upgrade.
-
- @return an UpgradeCommand for broadcasting.
- @throws IOException]]>
- </doc>
- </method>
- <method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Complete upgrade.
- E.g. cleanup upgrade data structures or write metadata to disk.
-
- Returns an upgrade command that is used for broadcasting to other cluster
- components.
- E.g. data-nodes inform the name-node that they completed the upgrade
- while other data-nodes are still upgrading.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="details" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get status report for the upgrade.
-
- @param details true if upgradeStatus details need to be included,
- false otherwise
- @return {@link UpgradeStatusReport}
- @throws IOException]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Common interface for distributed upgrade objects.
-
- Each upgrade object corresponds to a layout version,
- which is the latest version that should be upgraded using this object.
- That is all components whose layout version is greater or equal to the
- one returned by {@link #getVersion()} must be upgraded with this object.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.common.Upgradeable -->
- <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
- <class name="UpgradeManager" extends="java.lang.Object"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UpgradeManager"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getBroadcastCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUpgradeState" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUpgradeVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setUpgradeState"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="uState" type="boolean"/>
- <param name="uVersion" type="int"/>
- </method>
- <method name="getDistributedUpgrades" return="java.util.SortedSet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getUpgradeStatus" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="initializeUpgrade" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="isUpgradeCompleted" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="startUpgrade" return="boolean"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="completeUpgrade"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="currentUpgrades" type="java.util.SortedSet"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="upgradeState" type="boolean"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="upgradeVersion" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="broadcastCommand" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Generic upgrade manager.
-
- {@link #broadcastCommand} is the command that should be]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeManager -->
- <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
- <class name="UpgradeObject" extends="java.lang.Object"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
- <constructor name="UpgradeObject"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getUpgradeStatus" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDescription" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUpgradeStatusReport" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="details" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="org.apache.hadoop.hdfs.server.common.Upgradeable"/>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="status" type="short"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Abstract upgrade object.
-
- Contains default implementation of common methods of {@link Upgradeable}
- interface.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObject -->
- <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
- <class name="UpgradeObjectCollection" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UpgradeObjectCollection"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getDistributedUpgrades" return="java.util.SortedSet"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="versionFrom" type="int"/>
- <param name="type" type="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Collection of upgrade objects.
- Upgrade objects should be registered here before they can be used.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeObjectCollection -->
- <!-- start class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
- <class name="UpgradeStatusReport" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="UpgradeStatusReport"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="UpgradeStatusReport" type="int, short, boolean"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the layout version of the currently running upgrade.
- @return layout version]]>
- </doc>
- </method>
- <method name="getUpgradeStatus" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get upgrade upgradeStatus as a percentage of the total upgrade done.
-
- @see Upgradeable#getUpgradeStatus()]]>
- </doc>
- </method>
- <method name="isFinalized" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Is current upgrade finalized.
- @return true if finalized or false otherwise.]]>
- </doc>
- </method>
- <method name="getStatusText" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="details" type="boolean"/>
- <doc>
- <![CDATA[Get upgradeStatus data as a text for reporting.
- Should be overloaded for a particular upgrade specific upgradeStatus data.
-
- @param details true if upgradeStatus details need to be included,
- false otherwise
- @return text]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Print basic upgradeStatus details.]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="version" type="int"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="upgradeStatus" type="short"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="finalized" type="boolean"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Base upgrade upgradeStatus class.
- Overload this class if specific status fields need to be reported.
-
- Describes status of current upgrade.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.UpgradeStatusReport -->
- <!-- start class org.apache.hadoop.hdfs.server.common.Util -->
- <class name="Util" extends="java.lang.Object"
- abstract="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- <constructor name="Util"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="now" return="long"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Current system time.
- @return current time in msec.]]>
- </doc>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.common.Util -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.datanode">
- <!-- start class org.apache.hadoop.hdfs.server.datanode.DataNode -->
- <class name="DataNode" extends="org.apache.hadoop.conf.Configured"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"/>
- <implements name="org.apache.hadoop.hdfs.protocol.ClientDatanodeProtocol"/>
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <implements name="java.lang.Runnable"/>
- <method name="createSocketAddr" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="target" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Use {@link NetUtils#createSocketAddr(String)} instead.]]>
- </doc>
- </method>
- <method name="newSocket" return="java.net.Socket"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Creates either NIO or regular depending on socketWriteTimeout.]]>
- </doc>
- </method>
- <method name="getDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the DataNode object]]>
- </doc>
- </method>
- <method name="createInterDataNodeProtocolProxy" return="org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="datanodeid" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getNameNodeAddr" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getSelfAddr" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getNamenode" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the namenode's identifier]]>
- </doc>
- </method>
- <method name="setNewStorageID"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dnReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Shut down this instance of the datanode.
- Returns only after shutdown is complete.
- This method can only be called by the offerService thread.
- Otherwise, deadlock might occur.]]>
- </doc>
- </method>
- <method name="checkDiskError"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="e" type="java.io.IOException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="checkDiskError"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="offerService"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[Main loop for the DataNode. Runs until shutdown,
- forever calling remote NameNode functions.]]>
- </doc>
- </method>
- <method name="notifyNamenodeReceivedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="delHint" type="java.lang.String"/>
- </method>
- <method name="run"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[No matter what kind of exception we get, keep retrying to offerService().
- That's the loop that connects to the NameNode and provides basic DataNode
- functionality.
- Only stop when "shouldRun" is turned off (which can only happen at shutdown).]]>
- </doc>
- </method>
- <method name="runDatanodeDaemon"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dn" type="org.apache.hadoop.hdfs.server.datanode.DataNode"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Start a single datanode daemon and wait for it to finish.
- If this thread is specifically interrupted, it will stop waiting.]]>
- </doc>
- </method>
- <method name="instantiateDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Instantiate a single datanode object. This must be run by invoking
- {@link DataNode#runDatanodeDaemon(DataNode)} subsequently.]]>
- </doc>
- </method>
- <method name="createDataNode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Instantiate & Start a single datanode daemon and wait for it to finish.
- If this thread is specifically interrupted, it will stop waiting.]]>
- </doc>
- </method>
- <method name="makeInstance" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dataDirs" type="java.lang.String[]"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Make an instance of DataNode after ensuring that at least one of the
- given data directories (and their parent directories, if necessary)
- can be created.
- @param dataDirs List of directories, where the new DataNode instance should
- keep its files.
- @param conf Configuration instance to use.
- @return DataNode instance for given list of data dirs and conf, or null if
- no directory from this directory list can be created.
- @throws IOException]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="scheduleBlockReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="delay" type="long"/>
- <doc>
- <![CDATA[This methods arranges for the data node to send the block report at the next heartbeat.]]>
- </doc>
- </method>
- <method name="getFSDataset" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[This method is used for testing.
- Examples are adding and deleting blocks directly.
- The most common usage will be when the data node's storage is similated.
-
- @return the fsdataset that stores the blocks]]>
- </doc>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- </method>
- <method name="getBlockMetaDataInfo" return="org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="recoverBlocks" return="org.apache.hadoop.util.Daemon"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[][]"/>
- </method>
- <method name="updateBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="oldblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="finalize" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getProtocolVersion" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="protocol" type="java.lang.String"/>
- <param name="clientVersion" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="recoverBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="keepLength" type="boolean"/>
- <param name="targets" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DN_CLIENTTRACE_FORMAT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="namenode" type="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="data" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="dnRegistration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="EMPTY_DEL_HINT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockScanner" type="org.apache.hadoop.hdfs.server.datanode.DataBlockScanner"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockScannerThread" type="org.apache.hadoop.util.Daemon"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="ipcServer" type="org.apache.hadoop.ipc.Server"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="PKT_HEADER_LEN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Header size for a packet]]>
- </doc>
- </field>
- <doc>
- <![CDATA[DataNode is a class (and program) that stores a set of
- blocks for a DFS deployment. A single deployment can
- have one or many DataNodes. Each DataNode communicates
- regularly with a single NameNode. It also communicates
- with client code and other DataNodes from time to time.
- DataNodes store a series of named blocks. The DataNode
- allows client code to read these blocks, or to write new
- block data. The DataNode may also, in response to instructions
- from its NameNode, delete blocks or copy blocks to/from other
- DataNodes.
- The DataNode maintains just one critical table:
- block-> stream of bytes (of BLOCK_SIZE or less)
- This info is stored on a local disk. The DataNode
- reports the table's contents to the NameNode upon startup
- and every so often afterwards.
- DataNodes spend their lives in an endless loop of asking
- the NameNode for something to do. A NameNode cannot connect
- to a DataNode directly; a NameNode simply returns values from
- functions invoked by a DataNode.
- DataNodes maintain an open server socket so that client code
- or other DataNodes can read/write data. The host/port for
- this server is reported to the NameNode, which then sends that
- information to clients or other DataNodes that might be interested.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.DataNode -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
- <class name="DataStorage" extends="org.apache.hadoop.hdfs.server.common.Storage"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DataStorage" type="org.apache.hadoop.hdfs.server.common.StorageInfo, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getStorageID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="setFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="isConversionNeeded" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="corruptPreUpgradeStorage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="rootDir" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Data storage information file.
- <p>
- @see Storage]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.DataStorage -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
- <class name="FSDataset" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <implements name="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface"/>
- <constructor name="FSDataset" type="org.apache.hadoop.hdfs.server.datanode.DataStorage, org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[An FSDataset has a directory where it loads its data files.]]>
- </doc>
- </constructor>
- <method name="getMetaFile" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="findBlockFile" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blockId" type="long"/>
- <doc>
- <![CDATA[Return the block file for the given ID]]>
- </doc>
- </method>
- <method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blkid" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="metaFileExists" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getMetaDataLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return the total space used by dfs datanode]]>
- </doc>
- </method>
- <method name="getCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return total capacity, used and unused]]>
- </doc>
- </method>
- <method name="getRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Return how many bytes can still be stored in the FSDataset]]>
- </doc>
- </method>
- <method name="getLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Find the block's on-disk length]]>
- </doc>
- </method>
- <method name="getBlockFile" return="java.io.File"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get File name for a given block.]]>
- </doc>
- </method>
- <method name="getBlockInputStream" return="java.io.InputStream"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getBlockInputStream" return="java.io.InputStream"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="seekOffset" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blkOffset" type="long"/>
- <param name="ckoff" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns handles to the block file and its metadata file]]>
- </doc>
- </method>
- <method name="detachBlock" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="numLinks" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Make a copy of the block if this block is linked to an existing
- snapshot. This ensures that modifying this block does not modify
- data in any existing snapshots.
- @param block Block
- @param numLinks Detach if the number of links exceed this value
- @throws IOException
- @return - true if the specified block was detached]]>
- </doc>
- </method>
- <method name="updateBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="oldblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="writeToBlock" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="isRecovery" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Start writing to a block file
- If isRecovery is true and the block pre-exists, then we kill all
- volumeMap.put(b, v);
- volumeMap.put(b, v);
- other threads that might be writing to this block, and then reopen the file.]]>
- </doc>
- </method>
- <method name="getChannelPosition" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="streams" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Retrieves the offset in the block to which the
- the next write will write data to.]]>
- </doc>
- </method>
- <method name="setChannelPosition"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="streams" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
- <param name="dataOffset" type="long"/>
- <param name="ckOffset" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Sets the offset in the block to which the
- the next write will write data to.]]>
- </doc>
- </method>
- <method name="finalizeBlock"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Complete the block write!]]>
- </doc>
- </method>
- <method name="unfinalizeBlock"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Remove the temporary block file (if any)]]>
- </doc>
- </method>
- <method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.Block[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return a table of block data]]>
- </doc>
- </method>
- <method name="isValidBlock" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <doc>
- <![CDATA[Check whether the given block is a valid one.]]>
- </doc>
- </method>
- <method name="validateBlockMetadata"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="invalidate"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[We're informed that a block is no longer valid. We
- could lazily garbage-collect the block, but why bother?
- just get rid of it.]]>
- </doc>
- </method>
- <method name="getFile" return="java.io.File"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <doc>
- <![CDATA[Turn the block identifier into a filename.]]>
- </doc>
- </method>
- <method name="checkDataDir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
- <doc>
- <![CDATA[check if a data directory is healthy
- @throws DiskErrorException]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getStorageInfo" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="METADATA_EXTENSION" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="METADATA_VERSION" type="short"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[FSDataset manages a set of data blocks. Each block
- has a unique name and an extent on disk.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDataset -->
- <!-- start interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
- <interface name="FSDatasetInterface" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean"/>
- <method name="getMetaDataLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the length of the metadata file of the specified block
- @param b - the block for which the metadata length is desired
- @return the length of the metadata file for the specified block.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getMetaDataInputStream" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns metaData of block b as an input stream (and its length)
- @param b - the block
- @return the metadata input stream;
- @throws IOException]]>
- </doc>
- </method>
- <method name="metaFileExists" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Does the meta file exist for this block?
- @param b - the block
- @return true of the metafile for specified block exits
- @throws IOException]]>
- </doc>
- </method>
- <method name="getLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the specified block's on-disk length (excluding metadata)
- @param b
- @return the specified block's on-disk length (excluding metadta)
- @throws IOException]]>
- </doc>
- </method>
- <method name="getStoredBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blkid" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@return the generation stamp stored with the block.]]>
- </doc>
- </method>
- <method name="getBlockInputStream" return="java.io.InputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns an input stream to read the contents of the specified block
- @param b
- @return an input stream to read the contents of the specified block
- @throws IOException]]>
- </doc>
- </method>
- <method name="getBlockInputStream" return="java.io.InputStream"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="seekOffset" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns an input stream at specified offset of the specified block
- @param b
- @param seekOffset
- @return an input stream to read the contents of the specified block,
- starting at the offset
- @throws IOException]]>
- </doc>
- </method>
- <method name="getTmpInputStreams" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="blkoff" type="long"/>
- <param name="ckoff" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns an input stream at specified offset of the specified block
- The block is still in the tmp directory and is not finalized
- @param b
- @param blkoff
- @param ckoff
- @return an input stream to read the contents of the specified block,
- starting at the offset
- @throws IOException]]>
- </doc>
- </method>
- <method name="writeToBlock" return="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="isRecovery" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Creates the block and returns output streams to write data and CRC
- @param b
- @param isRecovery True if this is part of erro recovery, otherwise false
- @return a BlockWriteStreams object to allow writing the block data
- and CRC
- @throws IOException]]>
- </doc>
- </method>
- <method name="updateBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="oldblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update the block to the new generation stamp and length.]]>
- </doc>
- </method>
- <method name="finalizeBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Finalizes the block previously opened for writing using writeToBlock.
- The block size is what is in the parameter b and it must match the amount
- of data written
- @param b
- @throws IOException]]>
- </doc>
- </method>
- <method name="unfinalizeBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Unfinalizes the block previously opened for writing using writeToBlock.
- The temporary file associated with this block is deleted.
- @param b
- @throws IOException]]>
- </doc>
- </method>
- <method name="getBlockReport" return="org.apache.hadoop.hdfs.protocol.Block[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the block report - the full list of blocks stored
- @return - the block report - the full list of blocks stored]]>
- </doc>
- </method>
- <method name="isValidBlock" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <doc>
- <![CDATA[Is the block valid?
- @param b
- @return - true if the specified block is valid]]>
- </doc>
- </method>
- <method name="invalidate"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="invalidBlks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Invalidates the specified blocks
- @param invalidBlks - the blocks to be invalidated
- @throws IOException]]>
- </doc>
- </method>
- <method name="checkDataDir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="DiskChecker.DiskErrorException" type="org.apache.hadoop.util.DiskChecker.DiskErrorException"/>
- <doc>
- <![CDATA[Check if all the data directories are healthy
- @throws DiskErrorException]]>
- </doc>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Stringifies the name of the storage]]>
- </doc>
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Shutdown the FSDataset]]>
- </doc>
- </method>
- <method name="getChannelPosition" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="stream" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the current offset in the data stream.
- @param b
- @param stream The stream to the data file and checksum file
- @return the position of the file pointer in the data stream
- @throws IOException]]>
- </doc>
- </method>
- <method name="setChannelPosition"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="stream" type="org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams"/>
- <param name="dataOffset" type="long"/>
- <param name="ckOffset" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Sets the file pointer of the data stream and checksum stream to
- the specified values.
- @param b
- @param stream The stream for the data file and checksum file
- @param dataOffset The position to which the file pointre for the data stream
- should be set
- @param ckOffset The position to which the file pointre for the checksum stream
- should be set
- @throws IOException]]>
- </doc>
- </method>
- <method name="validateBlockMetadata"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Validate that the contents in the Block matches
- the file on disk. Returns true if everything is fine.
- @param b The block to be verified.
- @throws IOException]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This is an interface for the underlying storage that stores blocks for
- a data node.
- Examples are the FSDataset (which stores blocks on dirs) and
- SimulatedFSDataset (which simulates data).]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
- <class name="FSDatasetInterface.BlockInputStreams" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.io.Closeable"/>
- <method name="close"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This class contains the input streams for the data and checksum
- of a block]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockInputStreams -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
- <class name="FSDatasetInterface.BlockWriteStreams" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[This class contains the output streams for the data and checksum
- of a block]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.BlockWriteStreams -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
- <class name="FSDatasetInterface.MetaDataInputStream" extends="java.io.FilterInputStream"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getLength" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[This class provides the input stream and length of the metadata
- of a block]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.FSDatasetInterface.MetaDataInputStream -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
- <class name="UpgradeObjectDatanode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Runnable"/>
- <constructor name="UpgradeObjectDatanode"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDatanode" return="org.apache.hadoop.hdfs.server.datanode.DataNode"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </method>
- <method name="doUpgrade"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Specifies how the upgrade is performed.
- @throws IOException]]>
- </doc>
- </method>
- <method name="run"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="completeUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Complete upgrade and return a status complete command for broadcasting.
-
- Data-nodes finish upgrade at different times.
- The data-node needs to re-confirm with the name-node that the upgrade
- is complete while other nodes are still upgrading.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Base class for data-node upgrade objects.
- Data-node upgrades are run in separate threads.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.UpgradeObjectDatanode -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.datanode.metrics">
- <!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
- <class name="DataNodeActivityMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DataNodeActivityMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[This is the JMX MBean for reporting the DataNode Activity.
- The MBean is register using the name
- "hadoop:service=DataNode,name=DataNodeActivity-<storageid>"
-
- Many of the activity metrics are sampled and averaged on an interval
- which can be specified in the metrics config file.
- <p>
- For the metrics that are sampled and averaged, one must specify
- a metrics context that does periodic update calls. Most metrics contexts do.
- The default Null metrics context however does NOT. So if you aren't
- using any other metrics context then you can turn on the viewing and averaging
- of sampled metrics by specifying the following two lines
- in the hadoop-meterics.properties file:
- <pre>
- dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
- dfs.period=10
- </pre>
- <p>
- Note that the metrics are collected regardless of the context used.
- The context with the update thread is used to average the data periodically
- Impl details: We use a dynamic mbean that gets the list of the metrics
- from the metrics registry passed as an argument to the constructor]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeActivityMBean -->
- <!-- start class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
- <class name="DataNodeMetrics" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.metrics.Updater"/>
- <constructor name="DataNodeMetrics" type="org.apache.hadoop.conf.Configuration, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="doUpdates"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
- <doc>
- <![CDATA[Since this object is a registered updater, this method will be called
- periodically, e.g. every 5 seconds.]]>
- </doc>
- </method>
- <method name="resetAllMinMax"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="bytesWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="bytesRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingLong"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksWritten" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksRead" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksReplicated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksRemoved" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksVerified" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockVerificationFailures" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="readsFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="readsFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="writesFromLocalClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="writesFromRemoteClient" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="readBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="writeBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="readMetadataOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockChecksumOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="copyBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="replaceBlockOp" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="heartbeats" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockReports" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This class is for maintaining the various DataNode statistics
- and publishing them through the metrics interfaces.
- This also registers the JMX MBean for RPC.
- <p>
- This class has a number of metrics variables that are publicly accessible;
- these variables (objects) have methods to update their values;
- for example:
- <p> {@link #blocksRead}.inc()]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeMetrics -->
- <!-- start interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
- <interface name="FSDatasetMBean" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getDfsUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the total space (in bytes) used by dfs datanode
- @return the total space used by dfs datanode
- @throws IOException]]>
- </doc>
- </method>
- <method name="getCapacity" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns total capacity (in bytes) of storage (used and unused)
- @return total capacity of storage (used and unused)
- @throws IOException]]>
- </doc>
- </method>
- <method name="getRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the amount of free storage space (in bytes)
- @return The amount of free storage space
- @throws IOException]]>
- </doc>
- </method>
- <method name="getStorageInfo" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the storage id of the underlying storage]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This Interface defines the methods to get the status of a the FSDataset of
- a data node.
- It is also used for publishing via JMX (hence we follow the JMX naming
- convention.)
- * Note we have not used the MetricsDynamicMBeanBase to implement this
- because the interface for the FSDatasetMBean is stable and should
- be published as an interface.
-
- <p>
- Data Node runtime statistic info is report in another MBean
- @see org.apache.hadoop.hdfs.server.datanode.metrics.DataNodeStatisticsMBean]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.datanode.metrics.FSDatasetMBean -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.namenode">
- <!-- start class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
- <class name="CheckpointSignature" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.WritableComparable"/>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="compareTo" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"/>
- </method>
- <method name="equals" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="o" type="java.lang.Object"/>
- </method>
- <method name="hashCode" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[A unique signature intended to identify checkpoint transactions.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.CheckpointSignature -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
- <class name="CorruptReplicasMap" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="CorruptReplicasMap"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="addToCorruptReplicasMap"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="dn" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <doc>
- <![CDATA[Mark the block belonging to datanode as corrupt.
- @param blk Block to be added to CorruptReplicasMap
- @param dn DatanodeDescriptor which holds the corrupt replica]]>
- </doc>
- </method>
- <method name="numCorruptReplicas" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- </method>
- <method name="size" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[Stores information about all corrupt blocks in the File System.
- A Block is considered corrupt only if all of its replicas are
- corrupt. While reporting replicas of a Block, we hide any corrupt
- copies. These copies are removed once Block is found to have
- expected number of good replicas.
- Mapping: Block -> TreeSet<DatanodeDescriptor>]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
- <class name="DatanodeDescriptor" extends="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DatanodeDescriptor"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Default constructor]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
- @param nodeID id of the data node]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
-
- @param nodeID id of the data node
- @param networkLocation location of the data node in network]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
-
- @param nodeID id of the data node
- @param networkLocation location of the data node in network
- @param hostName it could be different from host specified for DatanodeID]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, long, long, long, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
-
- @param nodeID id of the data node
- @param capacity capacity of the data node
- @param dfsUsed space used by the data node
- @param remaining remaing capacity of the data node
- @param xceiverCount # of data transfers at the data node]]>
- </doc>
- </constructor>
- <constructor name="DatanodeDescriptor" type="org.apache.hadoop.hdfs.protocol.DatanodeID, java.lang.String, java.lang.String, long, long, long, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DatanodeDescriptor constructor
-
- @param nodeID id of the data node
- @param networkLocation location of the data node in network
- @param capacity capacity of the data node, including space used by non-dfs
- @param dfsUsed the used space by dfs datanode
- @param remaining remaing capacity of the data node
- @param xceiverCount # of data transfers at the data node]]>
- </doc>
- </constructor>
- <method name="numBlocks" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getBlocksScheduled" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return Approximate number of blocks currently scheduled to be written
- to this datanode.]]>
- </doc>
- </method>
- <field name="isAlive" type="boolean"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DatanodeDescriptor tracks stats on a given DataNode,
- such as available storage capacity, last update time, etc.,
- and maintains a set of blocks stored on the datanode.
- This data structure is a data structure that is internal
- to the namenode. It is *not* sent over-the-wire to the Client
- or the Datnodes. Neither is it stored persistently in the
- fsImage.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
- <class name="DatanodeDescriptor.BlockTargetPair" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <field name="block" type="org.apache.hadoop.hdfs.protocol.Block"
- transient="false" volatile="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="targets" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor[]"
- transient="false" volatile="false"
- static="false" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Block and targets pair]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor.BlockTargetPair -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
- <class name="FileChecksumServlets" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FileChecksumServlets"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[Servlets for file checksum]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
- <class name="FileChecksumServlets.GetServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FileChecksumServlets.GetServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Get FileChecksum]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.GetServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
- <class name="FileChecksumServlets.RedirectServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FileChecksumServlets.RedirectServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Redirect file checksum queries to an appropriate datanode.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FileChecksumServlets.RedirectServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
- <class name="FileDataServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FileDataServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="createUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="i" type="org.apache.hadoop.fs.FileStatus"/>
- <param name="ugi" type="org.apache.hadoop.security.UnixUserGroupInformation"/>
- <param name="nnproxy" type="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <exception name="IOException" type="java.io.IOException"/>
- <exception name="URISyntaxException" type="java.net.URISyntaxException"/>
- <doc>
- <![CDATA[Create a redirection URI]]>
- </doc>
- </method>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Service a GET request as described below.
- Request:
- {@code
- GET http://<nn>:<port>/data[/<path>] HTTP/1.1
- }]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Redirect queries about the hosted filesystem to an appropriate datanode.
- @see org.apache.hadoop.hdfs.HftpFileSystem]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FileDataServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
- <class name="FsckServlet" extends="javax.servlet.http.HttpServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FsckServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[This class is used in Namesystem's jetty to do fsck on namenode.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FsckServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
- <class name="FSEditLog" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="open"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create empty edit log files.
- Initialize the output stream for logging.
-
- @throws IOException]]>
- </doc>
- </method>
- <method name="createEditLogFile"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Shutdown the file store.]]>
- </doc>
- </method>
- <method name="logSync"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="logOpenFile"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFileUnderConstruction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Add open lease record to edit log.
- Records the block locations of the last block.]]>
- </doc>
- </method>
- <method name="logCloseFile"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INodeFile"/>
- <doc>
- <![CDATA[Add close lease record to edit log.]]>
- </doc>
- </method>
- <method name="logMkDir"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="newNode" type="org.apache.hadoop.hdfs.server.namenode.INode"/>
- <doc>
- <![CDATA[Add create directory record to edit log]]>
- </doc>
- </method>
- <doc>
- <![CDATA[FSEditLog maintains a log of the namespace modifications.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FSEditLog -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FSImage -->
- <class name="FSImage" extends="org.apache.hadoop.hdfs.server.common.Storage"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="FSImage" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="FSImage" type="java.io.File"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Represents an Image (image and edit file).]]>
- </doc>
- </constructor>
- <method name="getFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="props" type="java.util.Properties"/>
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Write last checkpoint time and version file into the storage directory.
-
- The version file should always be written last.
- Missing or corrupted version file indicates that
- the checkpoint is not valid.
-
- @param sd storage directory
- @throws IOException]]>
- </doc>
- </method>
- <method name="getEditLog" return="org.apache.hadoop.hdfs.server.namenode.FSEditLog"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="isConversionNeeded" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="sd" type="org.apache.hadoop.hdfs.server.common.Storage.StorageDirectory"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="saveFSImage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Save the contents of the FS image
- and create empty edits.]]>
- </doc>
- </method>
- <method name="format"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFsEditName" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="corruptPreUpgradeStorage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="rootDir" type="java.io.File"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="checkpointTime" type="long"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </field>
- <field name="removedStorageDirs" type="java.util.List"
- transient="false" volatile="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[list of failed (and thus removed) storages]]>
- </doc>
- </field>
- <doc>
- <![CDATA[FSImage handles checkpointing and logging of the namespace edits.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FSImage -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
- <class name="FSNamesystem" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <implements name="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean"/>
- <method name="getNamespaceDirs" return="java.util.Collection"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getNamespaceEditsDirs" return="java.util.Collection"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getUpgradePermission" return="org.apache.hadoop.fs.permission.PermissionStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the default path permission when upgrading from releases with no
- permissions (<=0.15) to releases with permissions (>=0.16)]]>
- </doc>
- </method>
- <method name="getFSNamesystem" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the FSNamesystem object]]>
- </doc>
- </method>
- <method name="close"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Close down this file system manager.
- Causes heartbeat and lease daemons to stop; waits briefly for
- them to finish, but a short timeout returns control back to caller.]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permission" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set permissions for an existing file.
- @throws IOException]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="username" type="java.lang.String"/>
- <param name="group" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set owner for an existing file.
- @throws IOException]]>
- </doc>
- </method>
- <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="offset" type="long"/>
- <param name="length" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get block locations within the specified range.
- @see ClientProtocol#getBlockLocations(String, long, long)]]>
- </doc>
- </method>
- <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="offset" type="long"/>
- <param name="length" type="long"/>
- <param name="doAccessTime" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get block locations within the specified range.
- @see ClientProtocol#getBlockLocations(String, long, long)]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[stores the modification and access time for this inode.
- The access time is precise upto an hour. The transaction, if needed, is
- written to the edits log but is not flushed.]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Set replication for an existing file.
-
- The NameNode sets new replication and schedules either replication of
- under-replicated data blocks or removal of the eccessive block copies
- if the blocks are over-replicated.
-
- @see ClientProtocol#setReplication(String, short)
- @param src file name
- @param replication new replication
- @return true if successful;
- false if file does not exist or is a directory]]>
- </doc>
- </method>
- <method name="getAdditionalBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client would like to obtain an additional block for the indicated
- filename (which is being written-to). Return an array that consists
- of the block, plus a set of machines. The first on this list should
- be where the client writes data. Subsequent items in the list must
- be provided in the connection to the first datanode.
- Make sure the previous blocks have been reported by datanodes and
- are replicated. Will return an empty 2-elt array if we want the
- client to "try again later".]]>
- </doc>
- </method>
- <method name="abandonBlock" return="boolean"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="src" type="java.lang.String"/>
- <param name="holder" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client would like to let go of the given block]]>
- </doc>
- </method>
- <method name="completeFile" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem.CompleteFileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="holder" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="markBlockAsCorrupt"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="dn" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Mark the block belonging to datanode as corrupt
- @param blk Block to be marked as corrupt
- @param dn Datanode which holds the corrupt replica]]>
- </doc>
- </method>
- <method name="invalidateBlock"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="dn" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Invalidates the given block on the given datanode.]]>
- </doc>
- </method>
- <method name="renameTo" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Change the indicated filename.]]>
- </doc>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Remove the indicated filename from namespace. If the filename
- is a directory (non empty) and recursive is set to false then throw exception.]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permissions" type="org.apache.hadoop.fs.permission.PermissionStatus"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create all the necessary directories]]>
- </doc>
- </method>
- <method name="getListing" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a listing of all files at 'src'. The Object[] array
- exists so we can return file attributes (soon to be implemented)]]>
- </doc>
- </method>
- <method name="registerDatanode"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Register Datanode.
- <p>
- The purpose of registration is to identify whether the new datanode
- serves a new data storage, and will report new data block copies,
- which the namenode was not aware of; or the datanode is a replacement
- node for the data storage that was previously served by a different
- or the same (in terms of host:port) datanode.
- The data storages are distinguished by their storageIDs. When a new
- data storage is reported the namenode issues a new unique storageID.
- <p>
- Finally, the namenode returns its namespaceID as the registrationID
- for the datanodes.
- namespaceID is a persistent attribute of the name space.
- The registrationID is checked every time the datanode is communicating
- with the namenode.
- Datanodes with inappropriate registrationID are rejected.
- If the namenode stops, and then restarts it can restore its
- namespaceID and will continue serving the datanodes that has previously
- registered with the namenode without restarting the whole cluster.
-
- @see org.apache.hadoop.hdfs.server.datanode.DataNode#register()]]>
- </doc>
- </method>
- <method name="getRegistrationID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get registrationID for datanodes based on the namespaceID.
-
- @see #registerDatanode(DatanodeRegistration)
- @see FSImage#newNamespaceID()
- @return registration ID]]>
- </doc>
- </method>
- <method name="computeDatanodeWork" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Compute block replication and block invalidation work
- that can be scheduled on data-nodes.
- The datanode will be informed of this work at the next heartbeat.
-
- @return number of blocks scheduled for replication or removal.]]>
- </doc>
- </method>
- <method name="setNodeReplicationLimit"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="limit" type="int"/>
- </method>
- <method name="removeDatanode"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[remove a datanode descriptor
- @param nodeID datanode ID]]>
- </doc>
- </method>
- <method name="processReport"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <param name="newReport" type="org.apache.hadoop.hdfs.protocol.BlockListAsLongs"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The given node is reporting all its blocks. Use this info to
- update the (machine-->blocklist) and (block-->machinelist) tables.]]>
- </doc>
- </method>
- <method name="blockReceived"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="delHint" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The given node is reporting that it received a certain block.]]>
- </doc>
- </method>
- <method name="getMissingBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCapacityTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total raw bytes including non-dfs used space.]]>
- </doc>
- </method>
- <method name="getCapacityUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total used space by data nodes]]>
- </doc>
- </method>
- <method name="getCapacityUsedPercent" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total used space by data nodes as percentage of total capacity]]>
- </doc>
- </method>
- <method name="getCapacityUsedNonDFS" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total used space by data nodes for non DFS purposes such
- as storing temporary files on the local file system]]>
- </doc>
- </method>
- <method name="getCapacityRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total non-used raw bytes.]]>
- </doc>
- </method>
- <method name="getCapacityRemainingPercent" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total remaining space by data nodes as percentage of total capacity]]>
- </doc>
- </method>
- <method name="getTotalLoad" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total number of connections.]]>
- </doc>
- </method>
- <method name="datanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
- <exception name="AccessControlException" type="org.apache.hadoop.security.AccessControlException"/>
- </method>
- <method name="DFSNodesStatus"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="live" type="java.util.ArrayList"/>
- <param name="dead" type="java.util.ArrayList"/>
- </method>
- <method name="stopDecommission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="node" type="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Stop decommissioning the specified datanodes.]]>
- </doc>
- </method>
- <method name="getDataNodeInfo" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getDFSNameNodeAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="use {@link NameNode#getNameNodeAddress()} instead.">
- <doc>
- <![CDATA[@deprecated use {@link NameNode#getNameNodeAddress()} instead.]]>
- </doc>
- </method>
- <method name="getStartTime" return="java.util.Date"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Rereads the config to get hosts and exclude list file names.
- Rereads the files to update the hosts and exclude lists. It
- checks if any of the hosts have changed states:
- 1. Added to hosts --> no further work needed here.
- 2. Removed from hosts --> mark AdminState as decommissioned.
- 3. Added to exclude --> start decommission.
- 4. Removed from exclude --> stop decommission.]]>
- </doc>
- </method>
- <method name="getDatanode" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeID" type="org.apache.hadoop.hdfs.protocol.DatanodeID"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get data node by storage ID.
-
- @param nodeID
- @return DatanodeDescriptor or null if the node is not found.
- @throws IOException]]>
- </doc>
- </method>
- <method name="randomDataNode" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRandomDatanode" return="org.apache.hadoop.hdfs.server.namenode.DatanodeDescriptor"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getBlocksTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Get the total number of blocks in the system.]]>
- </doc>
- </method>
- <method name="getFilesTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getPendingReplicationBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUnderReplicatedBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCorruptReplicaBlocksCount" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns number of blocks with corrupt replicas]]>
- </doc>
- </method>
- <method name="getScheduledReplicationBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getFSState" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getFSNamesystemMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[get FSNamesystemMetrics]]>
- </doc>
- </method>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[shutdown FSNamesystem]]>
- </doc>
- </method>
- <method name="numLiveDataNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of live data nodes
- @return Number of live data nodes]]>
- </doc>
- </method>
- <method name="numDeadDataNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of dead data nodes
- @return Number of dead data nodes]]>
- </doc>
- </method>
- <method name="setGenerationStamp"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="stamp" type="long"/>
- <doc>
- <![CDATA[Sets the generation stamp for this filesystem]]>
- </doc>
- </method>
- <method name="getGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Gets the generation stamp for this filesystem]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="AUDIT_FORMAT" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="auditLog" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="dir" type="org.apache.hadoop.hdfs.server.namenode.FSDirectory"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="corruptReplicas" type="org.apache.hadoop.hdfs.server.namenode.CorruptReplicasMap"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="leaseManager" type="org.apache.hadoop.hdfs.server.namenode.LeaseManager"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="lmthread" type="org.apache.hadoop.util.Daemon"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="replthread" type="org.apache.hadoop.util.Daemon"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="fsNamesystemObject" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- transient="false" volatile="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[FSNamesystem does the actual bookkeeping work for the
- DataNode.
- It tracks several important tables.
- 1) valid fsname --> blocklist (kept on disk, logged)
- 2) Set of all valid blocks (inverted #1)
- 3) block --> machinelist (kept in memory, rebuilt dynamically from reports)
- 4) machine --> blocklist (inverted #2)
- 5) LRU cache of updated-heartbeat machines]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.FSNamesystem -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
- <class name="GetImageServlet" extends="javax.servlet.http.HttpServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="GetImageServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[This class is used in Namesystem's jetty to retrieve a file.
- Typically used by the Secondary NameNode to retrieve image and
- edit file for periodic checkpointing.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.GetImageServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.JspHelper -->
- <class name="JspHelper" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="JspHelper"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="randomNode" return="org.apache.hadoop.hdfs.protocol.DatanodeID"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="bestNode" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blk" type="org.apache.hadoop.hdfs.protocol.LocatedBlock"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="streamBlockInAscii"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="addr" type="java.net.InetSocketAddress"/>
- <param name="blockId" type="long"/>
- <param name="genStamp" type="long"/>
- <param name="blockSize" type="long"/>
- <param name="offsetIntoBlock" type="long"/>
- <param name="chunkSizeToView" type="long"/>
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="DFSNodesStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="live" type="java.util.ArrayList"/>
- <param name="dead" type="java.util.ArrayList"/>
- </method>
- <method name="addTableHeader"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="addTableRow"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="columns" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="addTableRow"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="columns" type="java.lang.String[]"/>
- <param name="row" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="addTableFooter"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getSafeModeText" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getWarningText" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="fsn" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"/>
- </method>
- <method name="getInodeLimitText" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getUpgradeStatusText" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="sortNodeList"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodes" type="java.util.ArrayList"/>
- <param name="field" type="java.lang.String"/>
- <param name="order" type="java.lang.String"/>
- </method>
- <method name="printPathWithLinks"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="dir" type="java.lang.String"/>
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="namenodeInfoPort" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="printGotoForm"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="namenodeInfoPort" type="int"/>
- <param name="file" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="createTitle"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="javax.servlet.jsp.JspWriter"/>
- <param name="req" type="javax.servlet.http.HttpServletRequest"/>
- <param name="file" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="WEB_UGI_PROPERTY_NAME" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="nameNodeAddr" type="java.net.InetSocketAddress"
- transient="false" volatile="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="conf" type="org.apache.hadoop.conf.Configuration"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="webUGI" type="org.apache.hadoop.security.UnixUserGroupInformation"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="defaultChunkSizeToView" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.JspHelper -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
- <class name="LeaseExpiredException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="LeaseExpiredException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The lease that was being used to create this file has expired.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseExpiredException -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
- <class name="LeaseManager" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getLeaseByPath" return="org.apache.hadoop.hdfs.server.namenode.LeaseManager.Lease"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <doc>
- <![CDATA[@return the lease containing src]]>
- </doc>
- </method>
- <method name="countLease" return="int"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[@return the number of leases currently in the system]]>
- </doc>
- </method>
- <method name="setLeasePeriod"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="softLimit" type="long"/>
- <param name="hardLimit" type="long"/>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[LeaseManager does the lease housekeeping for writing on files.
- This class also provides useful static methods for lease recovery.
-
- Lease Recovery Algorithm
- 1) Namenode retrieves lease information
- 2) For each file f in the lease, consider the last block b of f
- 2.1) Get the datanodes which contains b
- 2.2) Assign one of the datanodes as the primary datanode p
- 2.3) p obtains a new generation stamp form the namenode
- 2.4) p get the block info from each datanode
- 2.5) p computes the minimum block length
- 2.6) p updates the datanodes, which have a valid generation stamp,
- with the new generation stamp and the minimum block length
- 2.7) p acknowledges the namenode the update results
- 2.8) Namenode updates the BlockInfo
- 2.9) Namenode removes f from the lease
- and removes the lease once all files have been removed
- 2.10) Namenode commit changes to edit log]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.LeaseManager -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
- <class name="ListPathsServlet" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="ListPathsServlet"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="buildRoot" return="java.util.Map"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="doc" type="org.znerd.xmlenc.XMLOutputter"/>
- <doc>
- <![CDATA[Build a map from the query string, setting values and defaults.]]>
- </doc>
- </method>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Service a GET request as described below.
- Request:
- {@code
- GET http://<nn>:<port>/listPaths[/<path>][<?option>[&option]*] HTTP/1.1
- }
- Where <i>option</i> (default) in:
- recursive ("no")
- filter (".*")
- exclude ("\..*\.crc")
- Response: A flat list of files/directories in the following format:
- {@code
- <listing path="..." recursive="(yes|no)" filter="..."
- time="yyyy-MM-dd hh:mm:ss UTC" version="...">
- <directory path="..." modified="yyyy-MM-dd hh:mm:ss"/>
- <file path="..." modified="yyyy-MM-dd'T'hh:mm:ssZ" accesstime="yyyy-MM-dd'T'hh:mm:ssZ"
- blocksize="..."
- replication="..." size="..."/>
- </listing>
- }]]>
- </doc>
- </method>
- <field name="df" type="java.text.SimpleDateFormat"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Obtain meta-information about a filesystem.
- @see org.apache.hadoop.hdfs.HftpFileSystem]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.ListPathsServlet -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.NameNode -->
- <class name="NameNode" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.hdfs.protocol.ClientProtocol"/>
- <implements name="org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol"/>
- <implements name="org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol"/>
- <implements name="org.apache.hadoop.hdfs.protocol.FSConstants"/>
- <implements name="org.apache.hadoop.security.authorize.RefreshAuthorizationPolicyProtocol"/>
- <constructor name="NameNode" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Start NameNode.
- <p>
- The name-node can be started with one of the following startup options:
- <ul>
- <li>{@link StartupOption#REGULAR REGULAR} - normal name node startup</li>
- <li>{@link StartupOption#FORMAT FORMAT} - format name node</li>
- <li>{@link StartupOption#UPGRADE UPGRADE} - start the cluster
- upgrade and create a snapshot of the current file system state</li>
- <li>{@link StartupOption#ROLLBACK ROLLBACK} - roll the
- cluster back to the previous state</li>
- </ul>
- The option is passed via configuration field:
- <tt>dfs.namenode.startup</tt>
-
- The conf will be modified to reflect the actual ports on which
- the NameNode is up and running if the user passes the port as
- <code>zero</code> in the conf.
-
- @param conf confirguration
- @throws IOException]]>
- </doc>
- </constructor>
- <method name="getProtocolVersion" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="protocol" type="java.lang.String"/>
- <param name="clientVersion" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="format"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Format a new filesystem. Destroys any filesystem that may already
- exist at this location.]]>
- </doc>
- </method>
- <method name="getNamesystem" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getNameNodeMetrics" return="org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="address" type="java.lang.String"/>
- </method>
- <method name="getAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- </method>
- <method name="getUri" return="java.net.URI"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="namenode" type="java.net.InetSocketAddress"/>
- </method>
- <method name="join"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Wait for service to finish.
- (Normally, it runs forever.)]]>
- </doc>
- </method>
- <method name="stop"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Stop all NameNode threads and wait for all to finish.]]>
- </doc>
- </method>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="size" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[return a list of blocks & their locations on <code>datanode</code> whose
- total size is <code>size</code>
-
- @param datanode on which blocks are located
- @param size total size of blocks]]>
- </doc>
- </method>
- <method name="getBlockLocations" return="org.apache.hadoop.hdfs.protocol.LocatedBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="offset" type="long"/>
- <param name="length" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="create"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <param name="clientName" type="java.lang.String"/>
- <param name="overwrite" type="boolean"/>
- <param name="replication" type="short"/>
- <param name="blockSize" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="append" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setReplication" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="replication" type="short"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setPermission"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="permissions" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setOwner"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="username" type="java.lang.String"/>
- <param name="groupname" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="addBlock" return="org.apache.hadoop.hdfs.protocol.LocatedBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="abandonBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="b" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="src" type="java.lang.String"/>
- <param name="holder" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client needs to give up on the block.]]>
- </doc>
- </method>
- <method name="complete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="reportBadBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[The client has detected an error on the specified located blocks
- and is reporting them to the server. For now, the namenode will
- mark the block as corrupt. In the future we might
- check the blocks are actually corrupt.]]>
- </doc>
- </method>
- <method name="nextGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="commitBlockSynchronization"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newgenerationstamp" type="long"/>
- <param name="newlength" type="long"/>
- <param name="closeFile" type="boolean"/>
- <param name="deleteblock" type="boolean"/>
- <param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="getPreferredBlockSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="filename" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="rename" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="dst" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="delete" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="recursive" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="mkdirs" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="masked" type="org.apache.hadoop.fs.permission.FsPermission"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="renewLease"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getListing" return="org.apache.hadoop.fs.FileStatus[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFileInfo" return="org.apache.hadoop.fs.FileStatus"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the file info for a specific file.
- @param src The string representation of the path to the file
- @throws IOException if permission to access file is denied by the system
- @return object containing information regarding the file
- or null if file not found]]>
- </doc>
- </method>
- <method name="getStats" return="long[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="getDatanodeReport" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="type" type="org.apache.hadoop.hdfs.protocol.FSConstants.DatanodeReportType"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="setSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.SafeModeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="isInSafeMode" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Is the cluster currently in safe mode?]]>
- </doc>
- </method>
- <method name="saveNamespace"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="refreshNodes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refresh the list of datanodes that the namenode should allow to
- connect. Re-reads conf by creating new Configuration object and
- uses the files list in the configuration to update the list.]]>
- </doc>
- </method>
- <method name="getEditLogSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the size of the current edit log.]]>
- </doc>
- </method>
- <method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Roll the edit log.]]>
- </doc>
- </method>
- <method name="rollFsImage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Roll the image]]>
- </doc>
- </method>
- <method name="finalizeUpgrade"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="distributedUpgradeProgress" return="org.apache.hadoop.hdfs.server.common.UpgradeStatusReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="action" type="org.apache.hadoop.hdfs.protocol.FSConstants.UpgradeAction"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="metaSave"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="filename" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Dumps namenode state into specified file]]>
- </doc>
- </method>
- <method name="getContentSummary" return="org.apache.hadoop.fs.ContentSummary"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setQuota"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="path" type="java.lang.String"/>
- <param name="namespaceQuota" type="long"/>
- <param name="diskspaceQuota" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="fsync"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="clientName" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="setTimes"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="src" type="java.lang.String"/>
- <param name="mtime" type="long"/>
- <param name="atime" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@inheritDoc]]>
- </doc>
- </method>
- <method name="register" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="capacity" type="long"/>
- <param name="dfsUsed" type="long"/>
- <param name="remaining" type="long"/>
- <param name="xmitsInProgress" type="int"/>
- <param name="xceiverCount" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Data node notify the name node that it is alive
- Return an array of block-oriented commands for the datanode to execute.
- This will be either a transfer or a delete operation.]]>
- </doc>
- </method>
- <method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="blocks" type="long[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="blockReceived"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <param name="delHints" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="errorReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="errorCode" type="int"/>
- <param name="msg" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="verifyRequest"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="nodeReg" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Verify request.
-
- Verifies correctness of the datanode version, registration ID, and
- if the datanode does not need to be shutdown.
-
- @param nodeReg data node registration
- @throws IOException]]>
- </doc>
- </method>
- <method name="verifyVersion"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="version" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Verify version.
-
- @param version
- @throws IOException]]>
- </doc>
- </method>
- <method name="getFsImageName" return="java.io.File"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the name of the fsImage file]]>
- </doc>
- </method>
- <method name="getFSImage" return="org.apache.hadoop.hdfs.server.namenode.FSImage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getFsImageNameCheckpoint" return="java.io.File[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Returns the name of the fsImage file uploaded by periodic
- checkpointing]]>
- </doc>
- </method>
- <method name="getNameNodeAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the address on which the NameNodes is listening to.
- @return the address on which the NameNodes is listening to.]]>
- </doc>
- </method>
- <method name="getHttpAddress" return="java.net.InetSocketAddress"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Returns the address of the NameNodes http server,
- which is used to access the name-node web UI.
-
- @return the http address.]]>
- </doc>
- </method>
- <method name="refreshServiceAcl"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="createNameNode" return="org.apache.hadoop.hdfs.server.namenode.NameNode"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <param name="conf" type="org.apache.hadoop.conf.Configuration"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- </method>
- <field name="DEFAULT_PORT" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="stateChangeLog" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="namesystem" type="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[NameNode serves as both directory namespace manager and
- "inode table" for the Hadoop DFS. There is a single NameNode
- running in any DFS deployment. (Well, except when there
- is a second backup/failover NameNode.)
- The NameNode controls two critical tables:
- 1) filename->blocksequence (namespace)
- 2) block->machinelist ("inodes")
- The first table is stored on disk and is very precious.
- The second table is rebuilt every time the NameNode comes
- up.
- 'NameNode' refers to both this class as well as the 'NameNode server'.
- The 'FSNamesystem' class actually performs most of the filesystem
- management. The majority of the 'NameNode' class itself is concerned
- with exposing the IPC interface and the http server to the outside world,
- plus some configuration management.
- NameNode implements the ClientProtocol interface, which allows
- clients to ask for DFS services. ClientProtocol is not
- designed for direct use by authors of DFS client code. End-users
- should instead use the org.apache.nutch.hadoop.fs.FileSystem class.
- NameNode also implements the DatanodeProtocol interface, used by
- DataNode programs that actually store DFS data blocks. These
- methods are invoked repeatedly and automatically by all the
- DataNodes in a DFS deployment.
- NameNode also implements the NamenodeProtocol interface, used by
- secondary namenodes or rebalancing processes to get partial namenode's
- state, for example partial blocksMap etc.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.NameNode -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
- <class name="NamenodeFsck" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="NamenodeFsck" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.namenode.NameNode, java.util.Map, javax.servlet.http.HttpServletResponse"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Filesystem checker.
- @param conf configuration (namenode config)
- @param nn namenode that this fsck is going to use
- @param pmap key=value[] map that is passed to the http servlet as url parameters
- @param response the object into which this servelet writes the url contents
- @throws IOException]]>
- </doc>
- </constructor>
- <method name="fsck"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Check files on DFS, starting from the indicated path.
- @throws Exception]]>
- </doc>
- </method>
- <method name="run" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[@param args]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="CORRUPT_STATUS" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="HEALTHY_STATUS" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="NONEXISTENT_STATUS" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FAILURE_STATUS" type="java.lang.String"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FIXING_NONE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Don't attempt any fixing .]]>
- </doc>
- </field>
- <field name="FIXING_MOVE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Move corrupted files to /lost+found .]]>
- </doc>
- </field>
- <field name="FIXING_DELETE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Delete corrupted files.]]>
- </doc>
- </field>
- <doc>
- <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
- sub-optimal conditions.
- <p>The tool scans all files and directories, starting from an indicated
- root path. The following abnormal conditions are detected and handled:</p>
- <ul>
- <li>files with blocks that are completely missing from all datanodes.<br/>
- In this case the tool can perform one of the following actions:
- <ul>
- <li>none ({@link #FIXING_NONE})</li>
- <li>move corrupted files to /lost+found directory on DFS
- ({@link #FIXING_MOVE}). Remaining data blocks are saved as a
- block chains, representing longest consecutive series of valid blocks.</li>
- <li>delete corrupted files ({@link #FIXING_DELETE})</li>
- </ul>
- </li>
- <li>detect files with under-replicated or over-replicated blocks</li>
- </ul>
- Additionally, the tool collects a detailed overall DFS statistics, and
- optionally can print detailed statistics on block locations and replication
- factors of each file.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.FsckResult -->
- <class name="NamenodeFsck.FsckResult" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="NamenodeFsck.FsckResult"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="isHealthy" return="boolean"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[DFS is considered healthy if there are no missing blocks.]]>
- </doc>
- </method>
- <method name="addMissing"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="id" type="java.lang.String"/>
- <param name="size" type="long"/>
- <doc>
- <![CDATA[Add a missing block name, plus its size.]]>
- </doc>
- </method>
- <method name="getMissingIds" return="java.util.ArrayList"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return a list of missing block names (as list of Strings).]]>
- </doc>
- </method>
- <method name="getMissingSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return total size of missing data, in bytes.]]>
- </doc>
- </method>
- <method name="setMissingSize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="missingSize" type="long"/>
- </method>
- <method name="getExcessiveReplicas" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the number of over-replicated blocks.]]>
- </doc>
- </method>
- <method name="setExcessiveReplicas"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="overReplicatedBlocks" type="long"/>
- </method>
- <method name="getReplicationFactor" return="float"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the actual replication factor.]]>
- </doc>
- </method>
- <method name="getMissingReplicas" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the number of under-replicated blocks. Note: missing blocks are not counted here.]]>
- </doc>
- </method>
- <method name="setMissingReplicas"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="underReplicatedBlocks" type="long"/>
- </method>
- <method name="getTotalDirs" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return total number of directories encountered during this scan.]]>
- </doc>
- </method>
- <method name="setTotalDirs"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="totalDirs" type="long"/>
- </method>
- <method name="getTotalFiles" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return total number of files encountered during this scan.]]>
- </doc>
- </method>
- <method name="setTotalFiles"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="totalFiles" type="long"/>
- </method>
- <method name="getTotalOpenFiles" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return total number of files opened for write encountered during this scan.]]>
- </doc>
- </method>
- <method name="setTotalOpenFiles"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="totalOpenFiles" type="long"/>
- <doc>
- <![CDATA[Set total number of open files encountered during this scan.]]>
- </doc>
- </method>
- <method name="getTotalSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return total size of scanned data, in bytes.]]>
- </doc>
- </method>
- <method name="setTotalSize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="totalSize" type="long"/>
- </method>
- <method name="getTotalOpenFilesSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return total size of open files data, in bytes.]]>
- </doc>
- </method>
- <method name="setTotalOpenFilesSize"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="totalOpenFilesSize" type="long"/>
- </method>
- <method name="getReplication" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the intended replication factor, against which the over/under-
- replicated blocks are counted. Note: this values comes from the current
- Configuration supplied for the tool, so it may be different from the
- value in DFS Configuration.]]>
- </doc>
- </method>
- <method name="setReplication"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="replication" type="int"/>
- </method>
- <method name="getTotalBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the total number of blocks in the scanned area.]]>
- </doc>
- </method>
- <method name="setTotalBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="totalBlocks" type="long"/>
- </method>
- <method name="getTotalOpenFilesBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the total number of blocks held by open files.]]>
- </doc>
- </method>
- <method name="setTotalOpenFilesBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="totalOpenFilesBlocks" type="long"/>
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCorruptFiles" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Return the number of currupted files.]]>
- </doc>
- </method>
- <method name="setCorruptFiles"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="corruptFiles" type="long"/>
- </method>
- <doc>
- <![CDATA[FsckResult of checking, plus overall DFS statistics.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.NamenodeFsck.FsckResult -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
- <class name="NotReplicatedYetException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="NotReplicatedYetException" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[The file has not finished being written to enough datanodes yet.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.NotReplicatedYetException -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
- <class name="SafeModeException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="SafeModeException" type="java.lang.String, org.apache.hadoop.hdfs.server.namenode.FSNamesystem.SafeModeInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[This exception is thrown when the name node is in safe mode.
- Client cannot modified namespace until the safe mode is off.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.SafeModeException -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
- <class name="SecondaryNameNode" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="java.lang.Runnable"/>
- <constructor name="SecondaryNameNode" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Create a connection to the primary namenode.]]>
- </doc>
- </constructor>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Shut down this instance of the datanode.
- Returns only after shutdown is complete.]]>
- </doc>
- </method>
- <method name="run"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[main() has some simple utility methods.
- @param argv Command line parameters.
- @exception Exception if the filesystem does not exist.]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[The Secondary NameNode is a helper to the primary NameNode.
- The Secondary is responsible for supporting periodic checkpoints
- of the HDFS metadata. The current design allows only one Secondary
- NameNode per HDFs cluster.
- The Secondary NameNode is a daemon that periodically wakes
- up (determined by the schedule specified in the configuration),
- triggers a periodic checkpoint and then goes back to sleep.
- The Secondary NameNode uses the ClientProtocol to talk to the
- primary NameNode.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.SecondaryNameNode -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
- <class name="StreamFile" extends="org.apache.hadoop.hdfs.server.namenode.DfsServlet"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="StreamFile"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getDFSClient" return="org.apache.hadoop.hdfs.DFSClient"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[getting a client for connecting to dfs]]>
- </doc>
- </method>
- <method name="doGet"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="request" type="javax.servlet.http.HttpServletRequest"/>
- <param name="response" type="javax.servlet.http.HttpServletResponse"/>
- <exception name="ServletException" type="javax.servlet.ServletException"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.StreamFile -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
- <class name="UpgradeObjectNamenode" extends="org.apache.hadoop.hdfs.server.common.UpgradeObject"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UpgradeObjectNamenode"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="true" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="command" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Process an upgrade command.
- RPC has only one very generic command for all upgrade related inter
- component communications.
- The actual command recognition and execution should be handled here.
- The reply is sent back also as an UpgradeCommand.
-
- @param command
- @return the reply command which is analyzed on the client side.]]>
- </doc>
- </method>
- <method name="getType" return="org.apache.hadoop.hdfs.server.common.HdfsConstants.NodeType"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="startUpgrade" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="getFSNamesystem" return="org.apache.hadoop.hdfs.server.namenode.FSNamesystem"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </method>
- <method name="forceProceed"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[Base class for name-node upgrade objects.
- Data-node upgrades are run in separate threads.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.UpgradeObjectNamenode -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.namenode.metrics">
- <!-- start interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
- <interface name="FSNamesystemMBean" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <method name="getFSState" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[The state of the file system: Safemode or Operational
- @return the state]]>
- </doc>
- </method>
- <method name="getBlocksTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of allocated blocks in the system
- @return - number of allocated blocks]]>
- </doc>
- </method>
- <method name="getCapacityTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total storage capacity
- @return - total capacity in bytes]]>
- </doc>
- </method>
- <method name="getCapacityRemaining" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Free (unused) storage capacity
- @return - free capacity in bytes]]>
- </doc>
- </method>
- <method name="getCapacityUsed" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Used storage capacity
- @return - used capacity in bytes]]>
- </doc>
- </method>
- <method name="getFilesTotal" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total number of files and directories
- @return - num of files and directories]]>
- </doc>
- </method>
- <method name="getPendingReplicationBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Blocks pending to be replicated
- @return - num of blocks to be replicated]]>
- </doc>
- </method>
- <method name="getUnderReplicatedBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Blocks under replicated
- @return - num of blocks under replicated]]>
- </doc>
- </method>
- <method name="getScheduledReplicationBlocks" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Blocks scheduled for replication
- @return - num of blocks scheduled for replication]]>
- </doc>
- </method>
- <method name="getTotalLoad" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Total Load on the FSNamesystem
- @return - total load of FSNamesystem]]>
- </doc>
- </method>
- <method name="numLiveDataNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of Live data nodes
- @return number of live data nodes]]>
- </doc>
- </method>
- <method name="numDeadDataNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Number of dead data nodes
- @return number of dead data nodes]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This Interface defines the methods to get the status of a the FSNamesystem of
- a name node.
- It is also used for publishing via JMX (hence we follow the JMX naming
- convention.)
-
- Note we have not used the MetricsDynamicMBeanBase to implement this
- because the interface for the NameNodeStateMBean is stable and should
- be published as an interface.
-
- <p>
- Name Node runtime activity statistic info is report in another MBean
- @see org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivityMBean]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMBean -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
- <class name="FSNamesystemMetrics" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.metrics.Updater"/>
- <constructor name="FSNamesystemMetrics" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="doUpdates"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
- <doc>
- <![CDATA[Since this object is a registered updater, this method will be called
- periodically, e.g. every 5 seconds.
- We set the metrics value within this function before pushing it out.
- FSNamesystem updates its own local variables which are
- light weight compared to Metrics counters.
- Some of the metrics are explicity casted to int. Few metrics collectors
- do not handle long values. It is safe to cast to int for now as all these
- values fit in int value.
- Metrics related to DFS capacity are stored in bytes which do not fit in
- int, so they are rounded to GB]]>
- </doc>
- </method>
- <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="filesTotal" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blocksTotal" type="org.apache.hadoop.metrics.util.MetricsLongValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="capacityTotalGB" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="capacityUsedGB" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="capacityRemainingGB" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="totalLoad" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="pendingReplicationBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="underReplicatedBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="scheduledReplicationBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="missingBlocks" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This class is for maintaining the various FSNamesystem status metrics
- and publishing them through the metrics interfaces.
- The SNamesystem creates and registers the JMX MBean.
- <p>
- This class has a number of metrics variables that are publicly accessible;
- these variables (objects) have methods to update their values;
- for example:
- <p> {@link #filesTotal}.set()]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.FSNamesystemMetrics -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivtyMBean -->
- <class name="NameNodeActivtyMBean" extends="org.apache.hadoop.metrics.util.MetricsDynamicMBeanBase"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="NameNodeActivtyMBean" type="org.apache.hadoop.metrics.util.MetricsRegistry"
- static="false" final="false" visibility="protected"
- deprecated="not deprecated">
- </constructor>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <doc>
- <![CDATA[This is the JMX MBean for reporting the NameNode Activity.
- The MBean is register using the name
- "hadoop:service=NameNode,name=NameNodeActivity"
-
- Many of the activity metrics are sampled and averaged on an interval
- which can be specified in the metrics config file.
- <p>
- For the metrics that are sampled and averaged, one must specify
- a metrics context that does periodic update calls. Most metrics contexts do.
- The default Null metrics context however does NOT. So if you aren't
- using any other metrics context then you can turn on the viewing and averaging
- of sampled metrics by specifying the following two lines
- in the hadoop-meterics.properties file:
- <pre>
- dfs.class=org.apache.hadoop.metrics.spi.NullContextWithUpdateThread
- dfs.period=10
- </pre>
- <p>
- Note that the metrics are collected regardless of the context used.
- The context with the update thread is used to average the data periodically
- Impl details: We use a dynamic mbean that gets the list of the metrics
- from the metrics registry passed as an argument to the constructor]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeActivtyMBean -->
- <!-- start class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
- <class name="NameNodeMetrics" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.metrics.Updater"/>
- <constructor name="NameNodeMetrics" type="org.apache.hadoop.conf.Configuration, org.apache.hadoop.hdfs.server.namenode.NameNode"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="shutdown"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="doUpdates"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="unused" type="org.apache.hadoop.metrics.MetricsContext"/>
- <doc>
- <![CDATA[Since this object is a registered updater, this method will be called
- periodically, e.g. every 5 seconds.]]>
- </doc>
- </method>
- <method name="resetAllMinMax"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <field name="registry" type="org.apache.hadoop.metrics.util.MetricsRegistry"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFilesCreated" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFilesAppended" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numGetBlockLocations" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numFilesRenamed" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numGetListingOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numCreateFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numDeleteFileOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numAddBlockOps" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="transactions" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="syncs" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="transactionsBatchedInSync" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingInt"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="blockReport" type="org.apache.hadoop.metrics.util.MetricsTimeVaryingRate"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="safeModeTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="fsImageLoadTime" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="numBlocksCorrupted" type="org.apache.hadoop.metrics.util.MetricsIntValue"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This class is for maintaining the various NameNode activity statistics
- and publishing them through the metrics interfaces.
- This also registers the JMX MBean for RPC.
- <p>
- This class has a number of metrics variables that are publicly accessible;
- these variables (objects) have methods to update their values;
- for example:
- <p> {@link #syncs}.inc()]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.namenode.metrics.NameNodeMetrics -->
- </package>
- <package name="org.apache.hadoop.hdfs.server.protocol">
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
- <class name="BlockCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="BlockCommand" type="int, java.util.List"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create BlockCommand for transferring blocks to another datanode
- @param blocktargetlist blocks to be transferred]]>
- </doc>
- </constructor>
- <constructor name="BlockCommand" type="int, org.apache.hadoop.hdfs.protocol.Block[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create BlockCommand for the given action
- @param blocks blocks related to the action]]>
- </doc>
- </constructor>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.protocol.Block[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getTargets" return="org.apache.hadoop.hdfs.protocol.DatanodeInfo[][]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[A BlockCommand is an instruction to a datanode
- regarding some blocks under its control. It tells
- the DataNode to either invalidate a set of indicated
- blocks, or to copy a set of indicated blocks to
- another DataNode.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockCommand -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo -->
- <class name="BlockMetaDataInfo" extends="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="BlockMetaDataInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="BlockMetaDataInfo" type="org.apache.hadoop.hdfs.protocol.Block, long"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getLastScanTime" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <doc>
- <![CDATA[Meta data information for a block]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
- <class name="BlocksWithLocations" extends="java.lang.Object"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="BlocksWithLocations" type="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Constructor with one parameter]]>
- </doc>
- </constructor>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[getter]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[serialization method]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[deserialization method]]>
- </doc>
- </method>
- <doc>
- <![CDATA[A class to implement an array of BlockLocations
- It provide efficient customized serialization/deserialization methods
- in stead of using the default array (de)serialization provided by RPC]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
- <class name="BlocksWithLocations.BlockWithLocations" extends="java.lang.Object"
- abstract="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="BlocksWithLocations.BlockWithLocations"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[default constructor]]>
- </doc>
- </constructor>
- <constructor name="BlocksWithLocations.BlockWithLocations" type="org.apache.hadoop.hdfs.protocol.Block, java.lang.String[]"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[constructor]]>
- </doc>
- </constructor>
- <method name="getBlock" return="org.apache.hadoop.hdfs.protocol.Block"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[get the block]]>
- </doc>
- </method>
- <method name="getDatanodes" return="java.lang.String[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[get the block's locations]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[deserialization method]]>
- </doc>
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[serialization method]]>
- </doc>
- </method>
- <doc>
- <![CDATA[A class to keep track of a block and its locations]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations.BlockWithLocations -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
- <class name="DatanodeCommand" extends="java.lang.Object"
- abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="DatanodeCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getAction" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="REGISTER" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="FINALIZE" type="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeCommand -->
- <!-- start interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
- <interface name="DatanodeProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="register" return="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Register Datanode.
- @see org.apache.hadoop.hdfs.server.datanode.DataNode#dnRegistration
- @see org.apache.hadoop.hdfs.server.namenode.FSNamesystem#registerDatanode(DatanodeRegistration)
-
- @return updated {@link org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration}, which contains
- new storageID if the datanode did not have one and
- registration ID for further communication.]]>
- </doc>
- </method>
- <method name="sendHeartbeat" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand[]"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="capacity" type="long"/>
- <param name="dfsUsed" type="long"/>
- <param name="remaining" type="long"/>
- <param name="xmitsInProgress" type="int"/>
- <param name="xceiverCount" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[sendHeartbeat() tells the NameNode that the DataNode is still
- alive and well. Includes some status info, too.
- It also gives the NameNode a chance to return
- an array of "DatanodeCommand" objects.
- A DatanodeCommand tells the DataNode to invalidate local block(s),
- or to copy them to other DataNodes, etc.]]>
- </doc>
- </method>
- <method name="blockReport" return="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="blocks" type="long[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[blockReport() tells the NameNode about all the locally-stored blocks.
- The NameNode returns an array of Blocks that have become obsolete
- and should be deleted. This function is meant to upload *all*
- the locally-stored blocks. It's invoked upon startup and then
- infrequently afterwards.
- @param registration
- @param blocks - the block list as an array of longs.
- Each block is represented as 2 longs.
- This is done instead of Block[] to reduce memory used by block reports.
-
- @return - the next command for DN to process.
- @throws IOException]]>
- </doc>
- </method>
- <method name="blockReceived"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.Block[]"/>
- <param name="delHints" type="java.lang.String[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[blockReceived() allows the DataNode to tell the NameNode about
- recently-received block data, with a hint for pereferred replica
- to be deleted when there is any excessive blocks.
- For example, whenever client code
- writes a new Block here, or another DataNode copies a Block to
- this DataNode, it will call blockReceived().]]>
- </doc>
- </method>
- <method name="errorReport"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="registration" type="org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration"/>
- <param name="errorCode" type="int"/>
- <param name="msg" type="java.lang.String"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[errorReport() tells the NameNode about something that has gone
- awry. Useful for debugging.]]>
- </doc>
- </method>
- <method name="versionRequest" return="org.apache.hadoop.hdfs.server.protocol.NamespaceInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="processUpgradeCommand" return="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="comm" type="org.apache.hadoop.hdfs.server.protocol.UpgradeCommand"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[This is a very general way to send a command to the name-node during
- distributed upgrade process.
-
- The generosity is because the variety of upgrade commands is unpredictable.
- The reply from the name-node is also received in the form of an upgrade
- command.
-
- @return a reply in the form of an upgrade command]]>
- </doc>
- </method>
- <method name="reportBadBlocks"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="blocks" type="org.apache.hadoop.hdfs.protocol.LocatedBlock[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[same as {@link org.apache.hadoop.hdfs.protocol.ClientProtocol#reportBadBlocks(LocatedBlock[])}
- }]]>
- </doc>
- </method>
- <method name="nextGenerationStamp" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@return the next GenerationStamp to be associated with the specified
- block.]]>
- </doc>
- </method>
- <method name="commitBlockSynchronization"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newgenerationstamp" type="long"/>
- <param name="newlength" type="long"/>
- <param name="closeFile" type="boolean"/>
- <param name="deleteblock" type="boolean"/>
- <param name="newtargets" type="org.apache.hadoop.hdfs.protocol.DatanodeID[]"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Commit block synchronization in lease recovery]]>
- </doc>
- </method>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[19: SendHeartbeat returns an array of DatanodeCommand objects
- in stead of a DatanodeCommand object.]]>
- </doc>
- </field>
- <field name="NOTIFY" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DISK_ERROR" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="INVALID_BLOCK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_UNKNOWN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Determines actions that data node should perform
- when receiving a datanode command.]]>
- </doc>
- </field>
- <field name="DNA_TRANSFER" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_INVALIDATE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_SHUTDOWN" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_REGISTER" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_FINALIZE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="DNA_RECOVERBLOCK" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[Protocol that a DFS datanode uses to communicate with the NameNode.
- It's used to upload current load information and block reports.
- The only way a NameNode can communicate with a DataNode is by
- returning values from these functions.]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.protocol.DatanodeProtocol -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
- <class name="DatanodeRegistration" extends="org.apache.hadoop.hdfs.protocol.DatanodeID"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="DatanodeRegistration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Default constructor.]]>
- </doc>
- </constructor>
- <constructor name="DatanodeRegistration" type="java.lang.String"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Create DatanodeRegistration]]>
- </doc>
- </constructor>
- <method name="setInfoPort"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="infoPort" type="int"/>
- </method>
- <method name="setIpcPort"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="ipcPort" type="int"/>
- </method>
- <method name="setStorageInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="storage" type="org.apache.hadoop.hdfs.server.datanode.DataStorage"/>
- </method>
- <method name="setName"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="name" type="java.lang.String"/>
- </method>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getRegistrationID" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="toString" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[{@inheritDoc}]]>
- </doc>
- </method>
- <field name="storageInfo" type="org.apache.hadoop.hdfs.server.common.StorageInfo"
- transient="false" volatile="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[DatanodeRegistration class conatins all information the Namenode needs
- to identify and verify a Datanode when it contacts the Namenode.
- This information is sent by Datanode with each communication request.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.DatanodeRegistration -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
- <class name="DisallowedDatanodeException" extends="java.io.IOException"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DisallowedDatanodeException" type="org.apache.hadoop.hdfs.protocol.DatanodeID"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <doc>
- <![CDATA[This exception is thrown when a datanode tries to register or communicate
- with the namenode when it does not appear on the list of included nodes,
- or has been specifically excluded.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.DisallowedDatanodeException -->
- <!-- start interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
- <interface name="InterDatanodeProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="getBlockMetaDataInfo" return="org.apache.hadoop.hdfs.server.protocol.BlockMetaDataInfo"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="block" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[@return the BlockMetaDataInfo of a block;
- null if the block is not found]]>
- </doc>
- </method>
- <method name="updateBlock"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="oldblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="newblock" type="org.apache.hadoop.hdfs.protocol.Block"/>
- <param name="finalize" type="boolean"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Update the block to the new generation stamp and length.]]>
- </doc>
- </method>
- <field name="LOG" type="org.apache.commons.logging.Log"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[3: added a finalize parameter to updateBlock]]>
- </doc>
- </field>
- <doc>
- <![CDATA[An inter-datanode protocol for updating generation stamp]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.protocol.InterDatanodeProtocol -->
- <!-- start interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
- <interface name="NamenodeProtocol" abstract="true"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.ipc.VersionedProtocol"/>
- <method name="getBlocks" return="org.apache.hadoop.hdfs.server.protocol.BlocksWithLocations"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="datanode" type="org.apache.hadoop.hdfs.protocol.DatanodeInfo"/>
- <param name="size" type="long"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get a list of blocks belonged to <code>datanode</code>
- whose total size is equal to <code>size</code>
- @param datanode a data node
- @param size requested size
- @return a list of blocks & their locations
- @throws RemoteException if size is less than or equal to 0 or
- datanode does not exist]]>
- </doc>
- </method>
- <method name="getEditLogSize" return="long"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Get the size of the current edit log (in bytes).
- @return The number of bytes in the current edit log.
- @throws IOException]]>
- </doc>
- </method>
- <method name="rollEditLog" return="org.apache.hadoop.hdfs.server.namenode.CheckpointSignature"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Closes the current edit log and opens a new one. The
- call fails if the file system is in SafeMode.
- @throws IOException
- @return a unique token to identify this transaction.]]>
- </doc>
- </method>
- <method name="rollFsImage"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Rolls the fsImage log. It removes the old fsImage, copies the
- new image to fsImage, removes the old edits and renames edits.new
- to edits. The call fails if any of the four files are missing.
- @throws IOException]]>
- </doc>
- </method>
- <field name="versionID" type="long"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[2: Added getEditLogSize(), rollEditLog(), rollFSImage().]]>
- </doc>
- </field>
- <doc>
- <![CDATA[Protocol that a secondary NameNode uses to communicate with the NameNode.
- It's used to get part of the name node state]]>
- </doc>
- </interface>
- <!-- end interface org.apache.hadoop.hdfs.server.protocol.NamenodeProtocol -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
- <class name="NamespaceInfo" extends="org.apache.hadoop.hdfs.server.common.StorageInfo"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.io.Writable"/>
- <constructor name="NamespaceInfo"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="NamespaceInfo" type="int, long, int"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getBuildVersion" return="java.lang.String"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getDistributedUpgradeVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <doc>
- <![CDATA[NamespaceInfo is returned by the name-node in reply
- to a data-node handshake.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.NamespaceInfo -->
- <!-- start class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
- <class name="UpgradeCommand" extends="org.apache.hadoop.hdfs.server.protocol.DatanodeCommand"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="UpgradeCommand"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <constructor name="UpgradeCommand" type="int, int, short"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </constructor>
- <method name="getVersion" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="getCurrentStatus" return="short"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- </method>
- <method name="write"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="out" type="java.io.DataOutput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <method name="readFields"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="in" type="java.io.DataInput"/>
- <exception name="IOException" type="java.io.IOException"/>
- </method>
- <field name="UC_ACTION_REPORT_STATUS" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <field name="UC_ACTION_START_UPGRADE" type="int"
- transient="false" volatile="false"
- static="true" final="true" visibility="public"
- deprecated="not deprecated">
- </field>
- <doc>
- <![CDATA[This as a generic distributed upgrade command.
-
- During the upgrade cluster components send upgrade commands to each other
- in order to obtain or share information with them.
- It is supposed that each upgrade defines specific upgrade command by
- deriving them from this class.
- The upgrade command contains version of the upgrade, which is verified
- on the receiving side and current status of the upgrade.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.server.protocol.UpgradeCommand -->
- </package>
- <package name="org.apache.hadoop.hdfs.tools">
- <!-- start class org.apache.hadoop.hdfs.tools.DFSAdmin -->
- <class name="DFSAdmin" extends="org.apache.hadoop.fs.FsShell"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <constructor name="DFSAdmin"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Construct a DFSAdmin object.]]>
- </doc>
- </constructor>
- <constructor name="DFSAdmin" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <doc>
- <![CDATA[Construct a DFSAdmin object.]]>
- </doc>
- </constructor>
- <method name="report"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Gives a report on how the FileSystem is doing.
- @exception IOException if the filesystem does not exist.]]>
- </doc>
- </method>
- <method name="setSafeMode"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <param name="idx" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Safe mode maintenance command.
- Usage: java DFSAdmin -safemode [enter | leave | get]
- @param argv List of of command line parameters.
- @param idx The index of the command that is being processed.
- @exception IOException if the filesystem does not exist.]]>
- </doc>
- </method>
- <method name="saveNamespace" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to ask the namenode to save the namespace.
- Usage: java DFSAdmin -saveNamespace
- @exception IOException
- @see org.apache.hadoop.hdfs.protocol.ClientProtocol#saveNamespace()]]>
- </doc>
- </method>
- <method name="refreshNodes" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to ask the namenode to reread the hosts and excluded hosts
- file.
- Usage: java DFSAdmin -refreshNodes
- @exception IOException]]>
- </doc>
- </method>
- <method name="finalizeUpgrade" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to ask the namenode to finalize previously performed upgrade.
- Usage: java DFSAdmin -finalizeUpgrade
- @exception IOException]]>
- </doc>
- </method>
- <method name="upgradeProgress" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <param name="idx" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Command to request current distributed upgrade status,
- a detailed status, or to force the upgrade to proceed.
-
- Usage: java DFSAdmin -upgradeProgress [status | details | force]
- @exception IOException]]>
- </doc>
- </method>
- <method name="metaSave" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <param name="idx" type="int"/>
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Dumps DFS data structures into specified file.
- Usage: java DFSAdmin -metasave filename
- @param argv List of of command line parameters.
- @param idx The index of the command that is being processed.
- @exception IOException if an error accoured wile accessing
- the file or path.]]>
- </doc>
- </method>
- <method name="refreshServiceAcl" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="IOException" type="java.io.IOException"/>
- <doc>
- <![CDATA[Refresh the authorization policy on the {@link NameNode}.
- @return exitcode 0 on success, non-zero on failure
- @throws IOException]]>
- </doc>
- </method>
- <method name="run" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[@param argv The parameters passed to this program.
- @exception Exception if the filesystem does not exist.
- @return 0 on success, non zero on error.]]>
- </doc>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="argv" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[main() has some simple utility methods.
- @param argv Command line parameters.
- @exception Exception if the filesystem does not exist.]]>
- </doc>
- </method>
- <doc>
- <![CDATA[This class provides some DFS administrative access.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.DFSAdmin -->
- <!-- start class org.apache.hadoop.hdfs.tools.DFSck -->
- <class name="DFSck" extends="org.apache.hadoop.conf.Configured"
- abstract="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <implements name="org.apache.hadoop.util.Tool"/>
- <constructor name="DFSck" type="org.apache.hadoop.conf.Configuration"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[Filesystem checker.
- @param conf current Configuration
- @throws Exception]]>
- </doc>
- </constructor>
- <method name="run" return="int"
- abstract="false" native="false" synchronized="false"
- static="false" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- <doc>
- <![CDATA[@param args]]>
- </doc>
- </method>
- <method name="main"
- abstract="false" native="false" synchronized="false"
- static="true" final="false" visibility="public"
- deprecated="not deprecated">
- <param name="args" type="java.lang.String[]"/>
- <exception name="Exception" type="java.lang.Exception"/>
- </method>
- <doc>
- <![CDATA[This class provides rudimentary checking of DFS volumes for errors and
- sub-optimal conditions.
- <p>The tool scans all files and directories, starting from an indicated
- root path. The following abnormal conditions are detected and handled:</p>
- <ul>
- <li>files with blocks that are completely missing from all datanodes.<br/>
- In this case the tool can perform one of the following actions:
- <ul>
- <li>none ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_NONE})</li>
- <li>move corrupted files to /lost+found directory on DFS
- ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_MOVE}). Remaining data blocks are saved as a
- block chains, representing longest consecutive series of valid blocks.</li>
- <li>delete corrupted files ({@link org.apache.hadoop.hdfs.server.namenode.NamenodeFsck#FIXING_DELETE})</li>
- </ul>
- </li>
- <li>detect files with under-replicated or over-replicated blocks</li>
- </ul>
- Additionally, the tool collects a detailed overall DFS statistics, and
- optionally can print detailed statistics on block locations and replication
- factors of each file.
- The tool also provides and option to filter open files during the scan.]]>
- </doc>
- </class>
- <!-- end class org.apache.hadoop.hdfs.tools.DFSck -->
- </package>
- </api>
|