hdfs.c 73 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213221422152216221722182219222022212222222322242225222622272228222922302231223222332234223522362237223822392240224122422243224422452246224722482249225022512252225322542255225622572258225922602261226222632264226522662267226822692270227122722273227422752276227722782279228022812282228322842285228622872288228922902291229222932294229522962297229822992300230123022303230423052306230723082309231023112312231323142315231623172318231923202321232223232324232523262327232823292330233123322333233423352336233723382339234023412342234323442345234623472348234923502351235223532354235523562357235823592360236123622363236423652366236723682369237023712372237323742375237623772378237923802381238223832384238523862387238823892390239123922393239423952396239723982399240024012402240324042405240624072408240924102411241224132414241524162417241824192420242124222423242424252426242724282429243024312432243324342435243624372438243924402441244224432444244524462447244824492450245124522453245424552456245724582459246024612462246324642465246624672468246924702471247224732474247524762477247824792480248124822483248424852486248724882489249024912492249324942495249624972498249925002501250225032504250525062507250825092510251125122513251425152516251725182519252025212522252325242525252625272528252925302531253225332534253525362537253825392540254125422543254425452546254725482549255025512552255325542555255625572558255925602561
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #include "exception.h"
  19. #include "hdfs.h"
  20. #include "jni_helper.h"
  21. #include <inttypes.h>
  22. #include <stdio.h>
  23. #include <string.h>
  24. /* Some frequently used Java paths */
  25. #define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
  26. #define HADOOP_PATH "org/apache/hadoop/fs/Path"
  27. #define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem"
  28. #define HADOOP_FS "org/apache/hadoop/fs/FileSystem"
  29. #define HADOOP_FSSTATUS "org/apache/hadoop/fs/FsStatus"
  30. #define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation"
  31. #define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem"
  32. #define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream"
  33. #define HADOOP_OSTRM "org/apache/hadoop/fs/FSDataOutputStream"
  34. #define HADOOP_STAT "org/apache/hadoop/fs/FileStatus"
  35. #define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission"
  36. #define JAVA_NET_ISA "java/net/InetSocketAddress"
  37. #define JAVA_NET_URI "java/net/URI"
  38. #define JAVA_STRING "java/lang/String"
  39. #define JAVA_VOID "V"
  40. /* Macros for constructing method signatures */
  41. #define JPARAM(X) "L" X ";"
  42. #define JARRPARAM(X) "[L" X ";"
  43. #define JMETHOD1(X, R) "(" X ")" R
  44. #define JMETHOD2(X, Y, R) "(" X Y ")" R
  45. #define JMETHOD3(X, Y, Z, R) "(" X Y Z")" R
  46. #define KERBEROS_TICKET_CACHE_PATH "hadoop.security.kerberos.ticket.cache.path"
  47. // Bit fields for hdfsFile_internal flags
  48. #define HDFS_FILE_SUPPORTS_DIRECT_READ (1<<0)
  49. tSize readDirect(hdfsFS fs, hdfsFile f, void* buffer, tSize length);
  50. static void hdfsFreeFileInfoEntry(hdfsFileInfo *hdfsFileInfo);
  51. /**
  52. * The C equivalent of org.apache.org.hadoop.FSData(Input|Output)Stream .
  53. */
  54. enum hdfsStreamType
  55. {
  56. UNINITIALIZED = 0,
  57. INPUT = 1,
  58. OUTPUT = 2,
  59. };
  60. /**
  61. * The 'file-handle' to a file in hdfs.
  62. */
  63. struct hdfsFile_internal {
  64. void* file;
  65. enum hdfsStreamType type;
  66. int flags;
  67. };
  68. int hdfsFileIsOpenForRead(hdfsFile file)
  69. {
  70. return (file->type == INPUT);
  71. }
  72. int hdfsFileIsOpenForWrite(hdfsFile file)
  73. {
  74. return (file->type == OUTPUT);
  75. }
  76. int hdfsFileUsesDirectRead(hdfsFile file)
  77. {
  78. return !!(file->flags & HDFS_FILE_SUPPORTS_DIRECT_READ);
  79. }
  80. void hdfsFileDisableDirectRead(hdfsFile file)
  81. {
  82. file->flags &= ~HDFS_FILE_SUPPORTS_DIRECT_READ;
  83. }
  84. /**
  85. * hdfsJniEnv: A wrapper struct to be used as 'value'
  86. * while saving thread -> JNIEnv* mappings
  87. */
  88. typedef struct
  89. {
  90. JNIEnv* env;
  91. } hdfsJniEnv;
  92. /**
  93. * Helper function to create a org.apache.hadoop.fs.Path object.
  94. * @param env: The JNIEnv pointer.
  95. * @param path: The file-path for which to construct org.apache.hadoop.fs.Path
  96. * object.
  97. * @return Returns a jobject on success and NULL on error.
  98. */
  99. static jthrowable constructNewObjectOfPath(JNIEnv *env, const char *path,
  100. jobject *out)
  101. {
  102. jthrowable jthr;
  103. jstring jPathString;
  104. jobject jPath;
  105. //Construct a java.lang.String object
  106. jthr = newJavaStr(env, path, &jPathString);
  107. if (jthr)
  108. return jthr;
  109. //Construct the org.apache.hadoop.fs.Path object
  110. jthr = constructNewObjectOfClass(env, &jPath, "org/apache/hadoop/fs/Path",
  111. "(Ljava/lang/String;)V", jPathString);
  112. destroyLocalReference(env, jPathString);
  113. if (jthr)
  114. return jthr;
  115. *out = jPath;
  116. return NULL;
  117. }
  118. /**
  119. * Set a configuration value.
  120. *
  121. * @param env The JNI environment
  122. * @param jConfiguration The configuration object to modify
  123. * @param key The key to modify
  124. * @param value The value to set the key to
  125. *
  126. * @return NULL on success; exception otherwise
  127. */
  128. static jthrowable hadoopConfSetStr(JNIEnv *env, jobject jConfiguration,
  129. const char *key, const char *value)
  130. {
  131. jthrowable jthr;
  132. jstring jkey = NULL, jvalue = NULL;
  133. jthr = newJavaStr(env, key, &jkey);
  134. if (jthr)
  135. goto done;
  136. jthr = newJavaStr(env, value, &jvalue);
  137. if (jthr)
  138. goto done;
  139. jthr = invokeMethod(env, NULL, INSTANCE, jConfiguration,
  140. HADOOP_CONF, "set", JMETHOD2(JPARAM(JAVA_STRING),
  141. JPARAM(JAVA_STRING), JAVA_VOID),
  142. jkey, jvalue);
  143. if (jthr)
  144. goto done;
  145. done:
  146. destroyLocalReference(env, jkey);
  147. destroyLocalReference(env, jvalue);
  148. return jthr;
  149. }
  150. static jthrowable hadoopConfGetStr(JNIEnv *env, jobject jConfiguration,
  151. const char *key, char **val)
  152. {
  153. jthrowable jthr;
  154. jvalue jVal;
  155. jstring jkey = NULL, jRet = NULL;
  156. jthr = newJavaStr(env, key, &jkey);
  157. if (jthr)
  158. goto done;
  159. jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
  160. HADOOP_CONF, "get", JMETHOD1(JPARAM(JAVA_STRING),
  161. JPARAM(JAVA_STRING)), jkey);
  162. if (jthr)
  163. goto done;
  164. jRet = jVal.l;
  165. jthr = newCStr(env, jRet, val);
  166. done:
  167. destroyLocalReference(env, jkey);
  168. destroyLocalReference(env, jRet);
  169. return jthr;
  170. }
  171. int hdfsConfGetStr(const char *key, char **val)
  172. {
  173. JNIEnv *env;
  174. int ret;
  175. jthrowable jthr;
  176. jobject jConfiguration = NULL;
  177. env = getJNIEnv();
  178. if (env == NULL) {
  179. ret = EINTERNAL;
  180. goto done;
  181. }
  182. jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
  183. if (jthr) {
  184. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  185. "hdfsConfGetStr(%s): new Configuration", key);
  186. goto done;
  187. }
  188. jthr = hadoopConfGetStr(env, jConfiguration, key, val);
  189. if (jthr) {
  190. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  191. "hdfsConfGetStr(%s): hadoopConfGetStr", key);
  192. goto done;
  193. }
  194. ret = 0;
  195. done:
  196. destroyLocalReference(env, jConfiguration);
  197. if (ret)
  198. errno = ret;
  199. return ret;
  200. }
  201. void hdfsConfStrFree(char *val)
  202. {
  203. free(val);
  204. }
  205. static jthrowable hadoopConfGetInt(JNIEnv *env, jobject jConfiguration,
  206. const char *key, int32_t *val)
  207. {
  208. jthrowable jthr = NULL;
  209. jvalue jVal;
  210. jstring jkey = NULL;
  211. jthr = newJavaStr(env, key, &jkey);
  212. if (jthr)
  213. return jthr;
  214. jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
  215. HADOOP_CONF, "getInt", JMETHOD2(JPARAM(JAVA_STRING), "I", "I"),
  216. jkey, (jint)(*val));
  217. destroyLocalReference(env, jkey);
  218. if (jthr)
  219. return jthr;
  220. *val = jVal.i;
  221. return NULL;
  222. }
  223. int hdfsConfGetInt(const char *key, int32_t *val)
  224. {
  225. JNIEnv *env;
  226. int ret;
  227. jobject jConfiguration = NULL;
  228. jthrowable jthr;
  229. env = getJNIEnv();
  230. if (env == NULL) {
  231. ret = EINTERNAL;
  232. goto done;
  233. }
  234. jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
  235. if (jthr) {
  236. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  237. "hdfsConfGetInt(%s): new Configuration", key);
  238. goto done;
  239. }
  240. jthr = hadoopConfGetInt(env, jConfiguration, key, val);
  241. if (jthr) {
  242. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  243. "hdfsConfGetInt(%s): hadoopConfGetInt", key);
  244. goto done;
  245. }
  246. ret = 0;
  247. done:
  248. destroyLocalReference(env, jConfiguration);
  249. if (ret)
  250. errno = ret;
  251. return ret;
  252. }
  253. struct hdfsBuilder {
  254. int forceNewInstance;
  255. const char *nn;
  256. tPort port;
  257. const char *kerbTicketCachePath;
  258. const char *userName;
  259. };
  260. struct hdfsBuilder *hdfsNewBuilder(void)
  261. {
  262. struct hdfsBuilder *bld = calloc(1, sizeof(struct hdfsBuilder));
  263. if (!bld) {
  264. errno = ENOMEM;
  265. return NULL;
  266. }
  267. return bld;
  268. }
  269. void hdfsFreeBuilder(struct hdfsBuilder *bld)
  270. {
  271. free(bld);
  272. }
  273. void hdfsBuilderSetForceNewInstance(struct hdfsBuilder *bld)
  274. {
  275. bld->forceNewInstance = 1;
  276. }
  277. void hdfsBuilderSetNameNode(struct hdfsBuilder *bld, const char *nn)
  278. {
  279. bld->nn = nn;
  280. }
  281. void hdfsBuilderSetNameNodePort(struct hdfsBuilder *bld, tPort port)
  282. {
  283. bld->port = port;
  284. }
  285. void hdfsBuilderSetUserName(struct hdfsBuilder *bld, const char *userName)
  286. {
  287. bld->userName = userName;
  288. }
  289. void hdfsBuilderSetKerbTicketCachePath(struct hdfsBuilder *bld,
  290. const char *kerbTicketCachePath)
  291. {
  292. bld->kerbTicketCachePath = kerbTicketCachePath;
  293. }
  294. hdfsFS hdfsConnect(const char* host, tPort port)
  295. {
  296. struct hdfsBuilder *bld = hdfsNewBuilder();
  297. if (!bld)
  298. return NULL;
  299. hdfsBuilderSetNameNode(bld, host);
  300. hdfsBuilderSetNameNodePort(bld, port);
  301. return hdfsBuilderConnect(bld);
  302. }
  303. /** Always return a new FileSystem handle */
  304. hdfsFS hdfsConnectNewInstance(const char* host, tPort port)
  305. {
  306. struct hdfsBuilder *bld = hdfsNewBuilder();
  307. if (!bld)
  308. return NULL;
  309. hdfsBuilderSetNameNode(bld, host);
  310. hdfsBuilderSetNameNodePort(bld, port);
  311. hdfsBuilderSetForceNewInstance(bld);
  312. return hdfsBuilderConnect(bld);
  313. }
  314. hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user)
  315. {
  316. struct hdfsBuilder *bld = hdfsNewBuilder();
  317. if (!bld)
  318. return NULL;
  319. hdfsBuilderSetNameNode(bld, host);
  320. hdfsBuilderSetNameNodePort(bld, port);
  321. hdfsBuilderSetUserName(bld, user);
  322. return hdfsBuilderConnect(bld);
  323. }
  324. /** Always return a new FileSystem handle */
  325. hdfsFS hdfsConnectAsUserNewInstance(const char* host, tPort port,
  326. const char *user)
  327. {
  328. struct hdfsBuilder *bld = hdfsNewBuilder();
  329. if (!bld)
  330. return NULL;
  331. hdfsBuilderSetNameNode(bld, host);
  332. hdfsBuilderSetNameNodePort(bld, port);
  333. hdfsBuilderSetForceNewInstance(bld);
  334. hdfsBuilderSetUserName(bld, user);
  335. return hdfsBuilderConnect(bld);
  336. }
  337. /**
  338. * Calculate the effective URI to use, given a builder configuration.
  339. *
  340. * If there is not already a URI scheme, we prepend 'hdfs://'.
  341. *
  342. * If there is not already a port specified, and a port was given to the
  343. * builder, we suffix that port. If there is a port specified but also one in
  344. * the URI, that is an error.
  345. *
  346. * @param bld The hdfs builder object
  347. * @param uri (out param) dynamically allocated string representing the
  348. * effective URI
  349. *
  350. * @return 0 on success; error code otherwise
  351. */
  352. static int calcEffectiveURI(struct hdfsBuilder *bld, char ** uri)
  353. {
  354. const char *scheme;
  355. char suffix[64];
  356. const char *lastColon;
  357. char *u;
  358. size_t uriLen;
  359. if (!bld->nn)
  360. return EINVAL;
  361. scheme = (strstr(bld->nn, "://")) ? "" : "hdfs://";
  362. if (bld->port == 0) {
  363. suffix[0] = '\0';
  364. } else {
  365. lastColon = rindex(bld->nn, ':');
  366. if (lastColon && (strspn(lastColon + 1, "0123456789") ==
  367. strlen(lastColon + 1))) {
  368. fprintf(stderr, "port %d was given, but URI '%s' already "
  369. "contains a port!\n", bld->port, bld->nn);
  370. return EINVAL;
  371. }
  372. snprintf(suffix, sizeof(suffix), ":%d", bld->port);
  373. }
  374. uriLen = strlen(scheme) + strlen(bld->nn) + strlen(suffix);
  375. u = malloc((uriLen + 1) * (sizeof(char)));
  376. if (!u) {
  377. fprintf(stderr, "calcEffectiveURI: out of memory");
  378. return ENOMEM;
  379. }
  380. snprintf(u, uriLen + 1, "%s%s%s", scheme, bld->nn, suffix);
  381. *uri = u;
  382. return 0;
  383. }
  384. static const char *maybeNull(const char *str)
  385. {
  386. return str ? str : "(NULL)";
  387. }
  388. static const char *hdfsBuilderToStr(const struct hdfsBuilder *bld,
  389. char *buf, size_t bufLen)
  390. {
  391. snprintf(buf, bufLen, "forceNewInstance=%d, nn=%s, port=%d, "
  392. "kerbTicketCachePath=%s, userName=%s",
  393. bld->forceNewInstance, maybeNull(bld->nn), bld->port,
  394. maybeNull(bld->kerbTicketCachePath), maybeNull(bld->userName));
  395. return buf;
  396. }
  397. hdfsFS hdfsBuilderConnect(struct hdfsBuilder *bld)
  398. {
  399. JNIEnv *env = 0;
  400. jobject jConfiguration = NULL, jFS = NULL, jURI = NULL, jCachePath = NULL;
  401. jstring jURIString = NULL, jUserString = NULL;
  402. jvalue jVal;
  403. jthrowable jthr = NULL;
  404. char *cURI = 0, buf[512];
  405. int ret;
  406. jobject jRet = NULL;
  407. //Get the JNIEnv* corresponding to current thread
  408. env = getJNIEnv();
  409. if (env == NULL) {
  410. ret = EINTERNAL;
  411. goto done;
  412. }
  413. // jConfiguration = new Configuration();
  414. jthr = constructNewObjectOfClass(env, &jConfiguration, HADOOP_CONF, "()V");
  415. if (jthr) {
  416. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  417. "hdfsBuilderConnect(%s)", hdfsBuilderToStr(bld, buf, sizeof(buf)));
  418. goto done;
  419. }
  420. //Check what type of FileSystem the caller wants...
  421. if (bld->nn == NULL) {
  422. // Get a local filesystem.
  423. if (bld->forceNewInstance) {
  424. // fs = FileSytem#newInstanceLocal(conf);
  425. jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
  426. "newInstanceLocal", JMETHOD1(JPARAM(HADOOP_CONF),
  427. JPARAM(HADOOP_LOCALFS)), jConfiguration);
  428. if (jthr) {
  429. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  430. "hdfsBuilderConnect(%s)",
  431. hdfsBuilderToStr(bld, buf, sizeof(buf)));
  432. goto done;
  433. }
  434. jFS = jVal.l;
  435. } else {
  436. // fs = FileSytem#getLocal(conf);
  437. jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "getLocal",
  438. JMETHOD1(JPARAM(HADOOP_CONF),
  439. JPARAM(HADOOP_LOCALFS)),
  440. jConfiguration);
  441. if (jthr) {
  442. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  443. "hdfsBuilderConnect(%s)",
  444. hdfsBuilderToStr(bld, buf, sizeof(buf)));
  445. goto done;
  446. }
  447. jFS = jVal.l;
  448. }
  449. } else {
  450. if (!strcmp(bld->nn, "default")) {
  451. // jURI = FileSystem.getDefaultUri(conf)
  452. jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
  453. "getDefaultUri",
  454. "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/URI;",
  455. jConfiguration);
  456. if (jthr) {
  457. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  458. "hdfsBuilderConnect(%s)",
  459. hdfsBuilderToStr(bld, buf, sizeof(buf)));
  460. goto done;
  461. }
  462. jURI = jVal.l;
  463. } else {
  464. // fs = FileSystem#get(URI, conf, ugi);
  465. ret = calcEffectiveURI(bld, &cURI);
  466. if (ret)
  467. goto done;
  468. jthr = newJavaStr(env, cURI, &jURIString);
  469. if (jthr) {
  470. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  471. "hdfsBuilderConnect(%s)",
  472. hdfsBuilderToStr(bld, buf, sizeof(buf)));
  473. goto done;
  474. }
  475. jthr = invokeMethod(env, &jVal, STATIC, NULL, JAVA_NET_URI,
  476. "create", "(Ljava/lang/String;)Ljava/net/URI;",
  477. jURIString);
  478. if (jthr) {
  479. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  480. "hdfsBuilderConnect(%s)",
  481. hdfsBuilderToStr(bld, buf, sizeof(buf)));
  482. goto done;
  483. }
  484. jURI = jVal.l;
  485. }
  486. if (bld->kerbTicketCachePath) {
  487. jthr = hadoopConfSetStr(env, jConfiguration,
  488. KERBEROS_TICKET_CACHE_PATH, bld->kerbTicketCachePath);
  489. if (jthr) {
  490. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  491. "hdfsBuilderConnect(%s)",
  492. hdfsBuilderToStr(bld, buf, sizeof(buf)));
  493. goto done;
  494. }
  495. }
  496. jthr = newJavaStr(env, bld->userName, &jUserString);
  497. if (jthr) {
  498. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  499. "hdfsBuilderConnect(%s)",
  500. hdfsBuilderToStr(bld, buf, sizeof(buf)));
  501. goto done;
  502. }
  503. if (bld->forceNewInstance) {
  504. jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS,
  505. "newInstance", JMETHOD3(JPARAM(JAVA_NET_URI),
  506. JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING),
  507. JPARAM(HADOOP_FS)),
  508. jURI, jConfiguration, jUserString);
  509. if (jthr) {
  510. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  511. "hdfsBuilderConnect(%s)",
  512. hdfsBuilderToStr(bld, buf, sizeof(buf)));
  513. goto done;
  514. }
  515. jFS = jVal.l;
  516. } else {
  517. jthr = invokeMethod(env, &jVal, STATIC, NULL, HADOOP_FS, "get",
  518. JMETHOD3(JPARAM(JAVA_NET_URI), JPARAM(HADOOP_CONF),
  519. JPARAM(JAVA_STRING), JPARAM(HADOOP_FS)),
  520. jURI, jConfiguration, jUserString);
  521. if (jthr) {
  522. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  523. "hdfsBuilderConnect(%s)",
  524. hdfsBuilderToStr(bld, buf, sizeof(buf)));
  525. goto done;
  526. }
  527. jFS = jVal.l;
  528. }
  529. }
  530. jRet = (*env)->NewGlobalRef(env, jFS);
  531. if (!jRet) {
  532. ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  533. "hdfsBuilderConnect(%s)",
  534. hdfsBuilderToStr(bld, buf, sizeof(buf)));
  535. goto done;
  536. }
  537. ret = 0;
  538. done:
  539. // Release unnecessary local references
  540. destroyLocalReference(env, jConfiguration);
  541. destroyLocalReference(env, jFS);
  542. destroyLocalReference(env, jURI);
  543. destroyLocalReference(env, jCachePath);
  544. destroyLocalReference(env, jURIString);
  545. destroyLocalReference(env, jUserString);
  546. free(cURI);
  547. free(bld);
  548. if (ret) {
  549. errno = ret;
  550. return NULL;
  551. }
  552. return (hdfsFS)jRet;
  553. }
  554. int hdfsDisconnect(hdfsFS fs)
  555. {
  556. // JAVA EQUIVALENT:
  557. // fs.close()
  558. //Get the JNIEnv* corresponding to current thread
  559. JNIEnv* env = getJNIEnv();
  560. int ret;
  561. if (env == NULL) {
  562. errno = EINTERNAL;
  563. return -1;
  564. }
  565. //Parameters
  566. jobject jFS = (jobject)fs;
  567. //Sanity check
  568. if (fs == NULL) {
  569. errno = EBADF;
  570. return -1;
  571. }
  572. jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
  573. "close", "()V");
  574. if (jthr) {
  575. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  576. "hdfsDisconnect: FileSystem#close");
  577. } else {
  578. ret = 0;
  579. }
  580. (*env)->DeleteGlobalRef(env, jFS);
  581. if (ret) {
  582. errno = ret;
  583. return -1;
  584. }
  585. return 0;
  586. }
  587. hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
  588. int bufferSize, short replication, tSize blockSize)
  589. {
  590. /*
  591. JAVA EQUIVALENT:
  592. File f = new File(path);
  593. FSData{Input|Output}Stream f{is|os} = fs.create(f);
  594. return f{is|os};
  595. */
  596. /* Get the JNIEnv* corresponding to current thread */
  597. JNIEnv* env = getJNIEnv();
  598. int accmode = flags & O_ACCMODE;
  599. if (env == NULL) {
  600. errno = EINTERNAL;
  601. return NULL;
  602. }
  603. jstring jStrBufferSize = NULL, jStrReplication = NULL;
  604. jstring jStrBlockSize = NULL;
  605. jobject jConfiguration = NULL, jPath = NULL, jFile = NULL;
  606. jobject jFS = (jobject)fs;
  607. jthrowable jthr;
  608. jvalue jVal;
  609. hdfsFile file = NULL;
  610. int ret;
  611. if (accmode == O_RDONLY || accmode == O_WRONLY) {
  612. /* yay */
  613. } else if (accmode == O_RDWR) {
  614. fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n");
  615. errno = ENOTSUP;
  616. return NULL;
  617. } else {
  618. fprintf(stderr, "ERROR: cannot open an hdfs file in mode 0x%x\n", accmode);
  619. errno = EINVAL;
  620. return NULL;
  621. }
  622. if ((flags & O_CREAT) && (flags & O_EXCL)) {
  623. fprintf(stderr, "WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
  624. }
  625. /* The hadoop java api/signature */
  626. const char* method = NULL;
  627. const char* signature = NULL;
  628. if (accmode == O_RDONLY) {
  629. method = "open";
  630. signature = JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM));
  631. } else if (flags & O_APPEND) {
  632. method = "append";
  633. signature = JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_OSTRM));
  634. } else {
  635. method = "create";
  636. signature = JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
  637. }
  638. /* Create an object of org.apache.hadoop.fs.Path */
  639. jthr = constructNewObjectOfPath(env, path, &jPath);
  640. if (jthr) {
  641. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  642. "hdfsOpenFile(%s): constructNewObjectOfPath", path);
  643. goto done;
  644. }
  645. /* Get the Configuration object from the FileSystem object */
  646. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
  647. "getConf", JMETHOD1("", JPARAM(HADOOP_CONF)));
  648. if (jthr) {
  649. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  650. "hdfsOpenFile(%s): FileSystem#getConf", path);
  651. goto done;
  652. }
  653. jConfiguration = jVal.l;
  654. jint jBufferSize = bufferSize;
  655. jshort jReplication = replication;
  656. jlong jBlockSize = blockSize;
  657. jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size");
  658. if (!jStrBufferSize) {
  659. ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, "OOM");
  660. goto done;
  661. }
  662. jStrReplication = (*env)->NewStringUTF(env, "dfs.replication");
  663. if (!jStrReplication) {
  664. ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, "OOM");
  665. goto done;
  666. }
  667. jStrBlockSize = (*env)->NewStringUTF(env, "dfs.block.size");
  668. if (!jStrBlockSize) {
  669. ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL, "OOM");
  670. goto done;
  671. }
  672. if (!bufferSize) {
  673. jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
  674. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  675. jStrBufferSize, 4096);
  676. if (jthr) {
  677. ret = printExceptionAndFree(env, jthr, NOPRINT_EXC_FILE_NOT_FOUND |
  678. NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_UNRESOLVED_LINK,
  679. "hdfsOpenFile(%s): Configuration#getInt(io.file.buffer.size)",
  680. path);
  681. goto done;
  682. }
  683. jBufferSize = jVal.i;
  684. }
  685. if ((accmode == O_WRONLY) && (flags & O_APPEND) == 0) {
  686. if (!replication) {
  687. jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
  688. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  689. jStrReplication, 1);
  690. if (jthr) {
  691. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  692. "hdfsOpenFile(%s): Configuration#getInt(dfs.replication)",
  693. path);
  694. goto done;
  695. }
  696. jReplication = jVal.i;
  697. }
  698. //blockSize
  699. if (!blockSize) {
  700. jthr = invokeMethod(env, &jVal, INSTANCE, jConfiguration,
  701. HADOOP_CONF, "getLong", "(Ljava/lang/String;J)J",
  702. jStrBlockSize, (jlong)67108864);
  703. if (jthr) {
  704. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  705. "hdfsOpenFile(%s): Configuration#getLong(dfs.block.size)",
  706. path);
  707. goto done;
  708. }
  709. jBlockSize = jVal.j;
  710. }
  711. }
  712. /* Create and return either the FSDataInputStream or
  713. FSDataOutputStream references jobject jStream */
  714. // READ?
  715. if (accmode == O_RDONLY) {
  716. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
  717. method, signature, jPath, jBufferSize);
  718. } else if ((accmode == O_WRONLY) && (flags & O_APPEND)) {
  719. // WRITE/APPEND?
  720. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
  721. method, signature, jPath);
  722. } else {
  723. // WRITE/CREATE
  724. jboolean jOverWrite = 1;
  725. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
  726. method, signature, jPath, jOverWrite,
  727. jBufferSize, jReplication, jBlockSize);
  728. }
  729. if (jthr) {
  730. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  731. "hdfsOpenFile(%s): FileSystem#%s(%s)", path, method, signature);
  732. goto done;
  733. }
  734. jFile = jVal.l;
  735. file = calloc(1, sizeof(struct hdfsFile_internal));
  736. if (!file) {
  737. fprintf(stderr, "hdfsOpenFile(%s): OOM create hdfsFile\n", path);
  738. ret = ENOMEM;
  739. goto done;
  740. }
  741. file->file = (*env)->NewGlobalRef(env, jFile);
  742. if (!file->file) {
  743. ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  744. "hdfsOpenFile(%s): NewGlobalRef", path);
  745. goto done;
  746. }
  747. file->type = (((flags & O_WRONLY) == 0) ? INPUT : OUTPUT);
  748. file->flags = 0;
  749. if ((flags & O_WRONLY) == 0) {
  750. // Try a test read to see if we can do direct reads
  751. char buf;
  752. if (readDirect(fs, file, &buf, 0) == 0) {
  753. // Success - 0-byte read should return 0
  754. file->flags |= HDFS_FILE_SUPPORTS_DIRECT_READ;
  755. } else if (errno != ENOTSUP) {
  756. // Unexpected error. Clear it, don't set the direct flag.
  757. fprintf(stderr,
  758. "hdfsOpenFile(%s): WARN: Unexpected error %d when testing "
  759. "for direct read compatibility\n", path, errno);
  760. }
  761. }
  762. ret = 0;
  763. done:
  764. destroyLocalReference(env, jStrBufferSize);
  765. destroyLocalReference(env, jStrReplication);
  766. destroyLocalReference(env, jStrBlockSize);
  767. destroyLocalReference(env, jConfiguration);
  768. destroyLocalReference(env, jPath);
  769. destroyLocalReference(env, jFile);
  770. if (ret) {
  771. if (file) {
  772. if (file->file) {
  773. (*env)->DeleteGlobalRef(env, file->file);
  774. }
  775. free(file);
  776. }
  777. errno = ret;
  778. return NULL;
  779. }
  780. return file;
  781. }
  782. int hdfsCloseFile(hdfsFS fs, hdfsFile file)
  783. {
  784. int ret;
  785. // JAVA EQUIVALENT:
  786. // file.close
  787. //Get the JNIEnv* corresponding to current thread
  788. JNIEnv* env = getJNIEnv();
  789. if (env == NULL) {
  790. errno = EINTERNAL;
  791. return -1;
  792. }
  793. //Caught exception
  794. jthrowable jthr;
  795. //Sanity check
  796. if (!file || file->type == UNINITIALIZED) {
  797. errno = EBADF;
  798. return -1;
  799. }
  800. //The interface whose 'close' method to be called
  801. const char* interface = (file->type == INPUT) ?
  802. HADOOP_ISTRM : HADOOP_OSTRM;
  803. jthr = invokeMethod(env, NULL, INSTANCE, file->file, interface,
  804. "close", "()V");
  805. if (jthr) {
  806. const char *interfaceShortName = (file->type == INPUT) ?
  807. "FSDataInputStream" : "FSDataOutputStream";
  808. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  809. "%s#close", interfaceShortName);
  810. } else {
  811. ret = 0;
  812. }
  813. //De-allocate memory
  814. (*env)->DeleteGlobalRef(env, file->file);
  815. free(file);
  816. if (ret) {
  817. errno = ret;
  818. return -1;
  819. }
  820. return 0;
  821. }
  822. int hdfsExists(hdfsFS fs, const char *path)
  823. {
  824. JNIEnv *env = getJNIEnv();
  825. if (env == NULL) {
  826. errno = EINTERNAL;
  827. return -1;
  828. }
  829. jobject jPath;
  830. jvalue jVal;
  831. jobject jFS = (jobject)fs;
  832. jthrowable jthr;
  833. if (path == NULL) {
  834. errno = EINVAL;
  835. return -1;
  836. }
  837. jthr = constructNewObjectOfPath(env, path, &jPath);
  838. if (jthr) {
  839. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  840. "hdfsExists: constructNewObjectOfPath");
  841. return -1;
  842. }
  843. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
  844. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"), jPath);
  845. destroyLocalReference(env, jPath);
  846. if (jthr) {
  847. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  848. "hdfsExists: invokeMethod(%s)",
  849. JMETHOD1(JPARAM(HADOOP_PATH), "Z"));
  850. return -1;
  851. }
  852. if (jVal.z) {
  853. return 0;
  854. } else {
  855. errno = ENOENT;
  856. return -1;
  857. }
  858. }
  859. // Checks input file for readiness for reading.
  860. static int readPrepare(JNIEnv* env, hdfsFS fs, hdfsFile f,
  861. jobject* jInputStream)
  862. {
  863. *jInputStream = (jobject)(f ? f->file : NULL);
  864. //Sanity check
  865. if (!f || f->type == UNINITIALIZED) {
  866. errno = EBADF;
  867. return -1;
  868. }
  869. //Error checking... make sure that this file is 'readable'
  870. if (f->type != INPUT) {
  871. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  872. errno = EINVAL;
  873. return -1;
  874. }
  875. return 0;
  876. }
  877. tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
  878. {
  879. if (length == 0) {
  880. return 0;
  881. } else if (length < 0) {
  882. errno = EINVAL;
  883. return -1;
  884. }
  885. if (f->flags & HDFS_FILE_SUPPORTS_DIRECT_READ) {
  886. return readDirect(fs, f, buffer, length);
  887. }
  888. // JAVA EQUIVALENT:
  889. // byte [] bR = new byte[length];
  890. // fis.read(bR);
  891. //Get the JNIEnv* corresponding to current thread
  892. JNIEnv* env = getJNIEnv();
  893. if (env == NULL) {
  894. errno = EINTERNAL;
  895. return -1;
  896. }
  897. //Parameters
  898. jobject jInputStream;
  899. if (readPrepare(env, fs, f, &jInputStream) == -1) {
  900. return -1;
  901. }
  902. jbyteArray jbRarray;
  903. jint noReadBytes = length;
  904. jvalue jVal;
  905. jthrowable jthr;
  906. //Read the requisite bytes
  907. jbRarray = (*env)->NewByteArray(env, length);
  908. if (!jbRarray) {
  909. errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  910. "hdfsRead: NewByteArray");
  911. return -1;
  912. }
  913. jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream, HADOOP_ISTRM,
  914. "read", "([B)I", jbRarray);
  915. if (jthr) {
  916. destroyLocalReference(env, jbRarray);
  917. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  918. "hdfsRead: FSDataInputStream#read");
  919. return -1;
  920. }
  921. if (jVal.i < 0) {
  922. // EOF
  923. destroyLocalReference(env, jbRarray);
  924. return 0;
  925. } else if (jVal.i == 0) {
  926. destroyLocalReference(env, jbRarray);
  927. errno = EINTR;
  928. return -1;
  929. }
  930. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  931. destroyLocalReference(env, jbRarray);
  932. if ((*env)->ExceptionCheck(env)) {
  933. errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  934. "hdfsRead: GetByteArrayRegion");
  935. return -1;
  936. }
  937. return jVal.i;
  938. }
  939. // Reads using the read(ByteBuffer) API, which does fewer copies
  940. tSize readDirect(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
  941. {
  942. // JAVA EQUIVALENT:
  943. // ByteBuffer bbuffer = ByteBuffer.allocateDirect(length) // wraps C buffer
  944. // fis.read(bbuffer);
  945. //Get the JNIEnv* corresponding to current thread
  946. JNIEnv* env = getJNIEnv();
  947. if (env == NULL) {
  948. errno = EINTERNAL;
  949. return -1;
  950. }
  951. jobject jInputStream;
  952. if (readPrepare(env, fs, f, &jInputStream) == -1) {
  953. return -1;
  954. }
  955. jvalue jVal;
  956. jthrowable jthr;
  957. //Read the requisite bytes
  958. jobject bb = (*env)->NewDirectByteBuffer(env, buffer, length);
  959. if (bb == NULL) {
  960. errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  961. "readDirect: NewDirectByteBuffer");
  962. return -1;
  963. }
  964. jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
  965. HADOOP_ISTRM, "read", "(Ljava/nio/ByteBuffer;)I", bb);
  966. destroyLocalReference(env, bb);
  967. if (jthr) {
  968. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  969. "readDirect: FSDataInputStream#read");
  970. return -1;
  971. }
  972. return (jVal.i < 0) ? 0 : jVal.i;
  973. }
  974. tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
  975. void* buffer, tSize length)
  976. {
  977. JNIEnv* env;
  978. jbyteArray jbRarray;
  979. jvalue jVal;
  980. jthrowable jthr;
  981. if (length == 0) {
  982. return 0;
  983. } else if (length < 0) {
  984. errno = EINVAL;
  985. return -1;
  986. }
  987. if (!f || f->type == UNINITIALIZED) {
  988. errno = EBADF;
  989. return -1;
  990. }
  991. env = getJNIEnv();
  992. if (env == NULL) {
  993. errno = EINTERNAL;
  994. return -1;
  995. }
  996. //Error checking... make sure that this file is 'readable'
  997. if (f->type != INPUT) {
  998. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  999. errno = EINVAL;
  1000. return -1;
  1001. }
  1002. // JAVA EQUIVALENT:
  1003. // byte [] bR = new byte[length];
  1004. // fis.read(pos, bR, 0, length);
  1005. jbRarray = (*env)->NewByteArray(env, length);
  1006. if (!jbRarray) {
  1007. errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  1008. "hdfsPread: NewByteArray");
  1009. return -1;
  1010. }
  1011. jthr = invokeMethod(env, &jVal, INSTANCE, f->file, HADOOP_ISTRM,
  1012. "read", "(J[BII)I", position, jbRarray, 0, length);
  1013. if (jthr) {
  1014. destroyLocalReference(env, jbRarray);
  1015. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1016. "hdfsPread: FSDataInputStream#read");
  1017. return -1;
  1018. }
  1019. if (jVal.i < 0) {
  1020. // EOF
  1021. destroyLocalReference(env, jbRarray);
  1022. return 0;
  1023. } else if (jVal.i == 0) {
  1024. destroyLocalReference(env, jbRarray);
  1025. errno = EINTR;
  1026. return -1;
  1027. }
  1028. (*env)->GetByteArrayRegion(env, jbRarray, 0, jVal.i, buffer);
  1029. destroyLocalReference(env, jbRarray);
  1030. if ((*env)->ExceptionCheck(env)) {
  1031. errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  1032. "hdfsPread: GetByteArrayRegion");
  1033. return -1;
  1034. }
  1035. return jVal.i;
  1036. }
  1037. tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
  1038. {
  1039. // JAVA EQUIVALENT
  1040. // byte b[] = str.getBytes();
  1041. // fso.write(b);
  1042. //Get the JNIEnv* corresponding to current thread
  1043. JNIEnv* env = getJNIEnv();
  1044. if (env == NULL) {
  1045. errno = EINTERNAL;
  1046. return -1;
  1047. }
  1048. //Sanity check
  1049. if (!f || f->type == UNINITIALIZED) {
  1050. errno = EBADF;
  1051. return -1;
  1052. }
  1053. jobject jOutputStream = f->file;
  1054. jbyteArray jbWarray;
  1055. jthrowable jthr;
  1056. if (length < 0) {
  1057. errno = EINVAL;
  1058. return -1;
  1059. }
  1060. //Error checking... make sure that this file is 'writable'
  1061. if (f->type != OUTPUT) {
  1062. fprintf(stderr, "Cannot write into a non-OutputStream object!\n");
  1063. errno = EINVAL;
  1064. return -1;
  1065. }
  1066. if (length < 0) {
  1067. errno = EINVAL;
  1068. return -1;
  1069. }
  1070. if (length == 0) {
  1071. return 0;
  1072. }
  1073. //Write the requisite bytes into the file
  1074. jbWarray = (*env)->NewByteArray(env, length);
  1075. if (!jbWarray) {
  1076. errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  1077. "hdfsWrite: NewByteArray");
  1078. return -1;
  1079. }
  1080. (*env)->SetByteArrayRegion(env, jbWarray, 0, length, buffer);
  1081. if ((*env)->ExceptionCheck(env)) {
  1082. destroyLocalReference(env, jbWarray);
  1083. errno = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  1084. "hdfsWrite(length = %d): SetByteArrayRegion", length);
  1085. return -1;
  1086. }
  1087. jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
  1088. HADOOP_OSTRM, "write", "([B)V", jbWarray);
  1089. destroyLocalReference(env, jbWarray);
  1090. if (jthr) {
  1091. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1092. "hdfsWrite: FSDataOutputStream#write");
  1093. return -1;
  1094. }
  1095. // Unlike most Java streams, FSDataOutputStream never does partial writes.
  1096. // If we succeeded, all the data was written.
  1097. return length;
  1098. }
  1099. int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
  1100. {
  1101. // JAVA EQUIVALENT
  1102. // fis.seek(pos);
  1103. //Get the JNIEnv* corresponding to current thread
  1104. JNIEnv* env = getJNIEnv();
  1105. if (env == NULL) {
  1106. errno = EINTERNAL;
  1107. return -1;
  1108. }
  1109. //Sanity check
  1110. if (!f || f->type != INPUT) {
  1111. errno = EBADF;
  1112. return -1;
  1113. }
  1114. jobject jInputStream = f->file;
  1115. jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jInputStream,
  1116. HADOOP_ISTRM, "seek", "(J)V", desiredPos);
  1117. if (jthr) {
  1118. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1119. "hdfsSeek(desiredPos=%" PRId64 ")"
  1120. ": FSDataInputStream#seek", desiredPos);
  1121. return -1;
  1122. }
  1123. return 0;
  1124. }
  1125. tOffset hdfsTell(hdfsFS fs, hdfsFile f)
  1126. {
  1127. // JAVA EQUIVALENT
  1128. // pos = f.getPos();
  1129. //Get the JNIEnv* corresponding to current thread
  1130. JNIEnv* env = getJNIEnv();
  1131. if (env == NULL) {
  1132. errno = EINTERNAL;
  1133. return -1;
  1134. }
  1135. //Sanity check
  1136. if (!f || f->type == UNINITIALIZED) {
  1137. errno = EBADF;
  1138. return -1;
  1139. }
  1140. //Parameters
  1141. jobject jStream = f->file;
  1142. const char* interface = (f->type == INPUT) ?
  1143. HADOOP_ISTRM : HADOOP_OSTRM;
  1144. jvalue jVal;
  1145. jthrowable jthr = invokeMethod(env, &jVal, INSTANCE, jStream,
  1146. interface, "getPos", "()J");
  1147. if (jthr) {
  1148. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1149. "hdfsTell: %s#getPos",
  1150. ((f->type == INPUT) ? "FSDataInputStream" :
  1151. "FSDataOutputStream"));
  1152. return -1;
  1153. }
  1154. return jVal.j;
  1155. }
  1156. int hdfsFlush(hdfsFS fs, hdfsFile f)
  1157. {
  1158. // JAVA EQUIVALENT
  1159. // fos.flush();
  1160. //Get the JNIEnv* corresponding to current thread
  1161. JNIEnv* env = getJNIEnv();
  1162. if (env == NULL) {
  1163. errno = EINTERNAL;
  1164. return -1;
  1165. }
  1166. //Sanity check
  1167. if (!f || f->type != OUTPUT) {
  1168. errno = EBADF;
  1169. return -1;
  1170. }
  1171. jthrowable jthr = invokeMethod(env, NULL, INSTANCE, f->file,
  1172. HADOOP_OSTRM, "flush", "()V");
  1173. if (jthr) {
  1174. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1175. "hdfsFlush: FSDataInputStream#flush");
  1176. return -1;
  1177. }
  1178. return 0;
  1179. }
  1180. int hdfsHFlush(hdfsFS fs, hdfsFile f)
  1181. {
  1182. //Get the JNIEnv* corresponding to current thread
  1183. JNIEnv* env = getJNIEnv();
  1184. if (env == NULL) {
  1185. errno = EINTERNAL;
  1186. return -1;
  1187. }
  1188. //Sanity check
  1189. if (!f || f->type != OUTPUT) {
  1190. errno = EBADF;
  1191. return -1;
  1192. }
  1193. jobject jOutputStream = f->file;
  1194. jthrowable jthr = invokeMethod(env, NULL, INSTANCE, jOutputStream,
  1195. HADOOP_OSTRM, "hflush", "()V");
  1196. if (jthr) {
  1197. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1198. "hdfsHFlush: FSDataOutputStream#hflush");
  1199. return -1;
  1200. }
  1201. return 0;
  1202. }
  1203. int hdfsAvailable(hdfsFS fs, hdfsFile f)
  1204. {
  1205. // JAVA EQUIVALENT
  1206. // fis.available();
  1207. //Get the JNIEnv* corresponding to current thread
  1208. JNIEnv* env = getJNIEnv();
  1209. if (env == NULL) {
  1210. errno = EINTERNAL;
  1211. return -1;
  1212. }
  1213. //Sanity check
  1214. if (!f || f->type != INPUT) {
  1215. errno = EBADF;
  1216. return -1;
  1217. }
  1218. //Parameters
  1219. jobject jInputStream = f->file;
  1220. jvalue jVal;
  1221. jthrowable jthr = invokeMethod(env, &jVal, INSTANCE, jInputStream,
  1222. HADOOP_ISTRM, "available", "()I");
  1223. if (jthr) {
  1224. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1225. "hdfsAvailable: FSDataInputStream#available");
  1226. return -1;
  1227. }
  1228. return jVal.i;
  1229. }
  1230. static int hdfsCopyImpl(hdfsFS srcFS, const char* src, hdfsFS dstFS,
  1231. const char* dst, jboolean deleteSource)
  1232. {
  1233. //JAVA EQUIVALENT
  1234. // FileUtil#copy(srcFS, srcPath, dstFS, dstPath,
  1235. // deleteSource = false, conf)
  1236. //Get the JNIEnv* corresponding to current thread
  1237. JNIEnv* env = getJNIEnv();
  1238. if (env == NULL) {
  1239. errno = EINTERNAL;
  1240. return -1;
  1241. }
  1242. //Parameters
  1243. jobject jSrcFS = (jobject)srcFS;
  1244. jobject jDstFS = (jobject)dstFS;
  1245. jobject jConfiguration = NULL, jSrcPath = NULL, jDstPath = NULL;
  1246. jthrowable jthr;
  1247. jvalue jVal;
  1248. int ret;
  1249. jthr = constructNewObjectOfPath(env, src, &jSrcPath);
  1250. if (jthr) {
  1251. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1252. "hdfsCopyImpl(src=%s): constructNewObjectOfPath", src);
  1253. goto done;
  1254. }
  1255. jthr = constructNewObjectOfPath(env, dst, &jDstPath);
  1256. if (jthr) {
  1257. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1258. "hdfsCopyImpl(dst=%s): constructNewObjectOfPath", dst);
  1259. goto done;
  1260. }
  1261. //Create the org.apache.hadoop.conf.Configuration object
  1262. jthr = constructNewObjectOfClass(env, &jConfiguration,
  1263. HADOOP_CONF, "()V");
  1264. if (jthr) {
  1265. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1266. "hdfsCopyImpl: Configuration constructor");
  1267. goto done;
  1268. }
  1269. //FileUtil#copy
  1270. jthr = invokeMethod(env, &jVal, STATIC,
  1271. NULL, "org/apache/hadoop/fs/FileUtil", "copy",
  1272. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
  1273. "Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;"
  1274. "ZLorg/apache/hadoop/conf/Configuration;)Z",
  1275. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  1276. jConfiguration);
  1277. if (jthr) {
  1278. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1279. "hdfsCopyImpl(src=%s, dst=%s, deleteSource=%d): "
  1280. "FileUtil#copy", src, dst, deleteSource);
  1281. goto done;
  1282. }
  1283. if (!jVal.z) {
  1284. ret = EIO;
  1285. goto done;
  1286. }
  1287. ret = 0;
  1288. done:
  1289. destroyLocalReference(env, jConfiguration);
  1290. destroyLocalReference(env, jSrcPath);
  1291. destroyLocalReference(env, jDstPath);
  1292. if (ret) {
  1293. errno = ret;
  1294. return -1;
  1295. }
  1296. return 0;
  1297. }
  1298. int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  1299. {
  1300. return hdfsCopyImpl(srcFS, src, dstFS, dst, 0);
  1301. }
  1302. int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  1303. {
  1304. return hdfsCopyImpl(srcFS, src, dstFS, dst, 1);
  1305. }
  1306. int hdfsDelete(hdfsFS fs, const char* path, int recursive)
  1307. {
  1308. // JAVA EQUIVALENT:
  1309. // Path p = new Path(path);
  1310. // bool retval = fs.delete(p, recursive);
  1311. //Get the JNIEnv* corresponding to current thread
  1312. JNIEnv* env = getJNIEnv();
  1313. if (env == NULL) {
  1314. errno = EINTERNAL;
  1315. return -1;
  1316. }
  1317. jobject jFS = (jobject)fs;
  1318. jthrowable jthr;
  1319. jobject jPath;
  1320. jvalue jVal;
  1321. jthr = constructNewObjectOfPath(env, path, &jPath);
  1322. if (jthr) {
  1323. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1324. "hdfsDelete(path=%s): constructNewObjectOfPath", path);
  1325. return -1;
  1326. }
  1327. jboolean jRecursive = recursive ? JNI_TRUE : JNI_FALSE;
  1328. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
  1329. "delete", "(Lorg/apache/hadoop/fs/Path;Z)Z",
  1330. jPath, jRecursive);
  1331. destroyLocalReference(env, jPath);
  1332. if (jthr) {
  1333. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1334. "hdfsDelete(path=%s, recursive=%d): "
  1335. "FileSystem#delete", path, recursive);
  1336. return -1;
  1337. }
  1338. if (!jVal.z) {
  1339. errno = EIO;
  1340. return -1;
  1341. }
  1342. return 0;
  1343. }
  1344. int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
  1345. {
  1346. // JAVA EQUIVALENT:
  1347. // Path old = new Path(oldPath);
  1348. // Path new = new Path(newPath);
  1349. // fs.rename(old, new);
  1350. //Get the JNIEnv* corresponding to current thread
  1351. JNIEnv* env = getJNIEnv();
  1352. if (env == NULL) {
  1353. errno = EINTERNAL;
  1354. return -1;
  1355. }
  1356. jobject jFS = (jobject)fs;
  1357. jthrowable jthr;
  1358. jobject jOldPath = NULL, jNewPath = NULL;
  1359. int ret = -1;
  1360. jvalue jVal;
  1361. jthr = constructNewObjectOfPath(env, oldPath, &jOldPath );
  1362. if (jthr) {
  1363. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1364. "hdfsRename: constructNewObjectOfPath(%s)", oldPath);
  1365. goto done;
  1366. }
  1367. jthr = constructNewObjectOfPath(env, newPath, &jNewPath);
  1368. if (jthr) {
  1369. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1370. "hdfsRename: constructNewObjectOfPath(%s)", newPath);
  1371. goto done;
  1372. }
  1373. // Rename the file
  1374. // TODO: use rename2 here? (See HDFS-3592)
  1375. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS, "rename",
  1376. JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_PATH), "Z"),
  1377. jOldPath, jNewPath);
  1378. if (jthr) {
  1379. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1380. "hdfsRename(oldPath=%s, newPath=%s): FileSystem#rename",
  1381. oldPath, newPath);
  1382. goto done;
  1383. }
  1384. if (!jVal.z) {
  1385. errno = EIO;
  1386. goto done;
  1387. }
  1388. ret = 0;
  1389. done:
  1390. destroyLocalReference(env, jOldPath);
  1391. destroyLocalReference(env, jNewPath);
  1392. return ret;
  1393. }
  1394. char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
  1395. {
  1396. // JAVA EQUIVALENT:
  1397. // Path p = fs.getWorkingDirectory();
  1398. // return p.toString()
  1399. //Get the JNIEnv* corresponding to current thread
  1400. JNIEnv* env = getJNIEnv();
  1401. if (env == NULL) {
  1402. errno = EINTERNAL;
  1403. return NULL;
  1404. }
  1405. jobject jPath = NULL;
  1406. jstring jPathString = NULL;
  1407. jobject jFS = (jobject)fs;
  1408. jvalue jVal;
  1409. jthrowable jthr;
  1410. int ret;
  1411. const char *jPathChars = NULL;
  1412. //FileSystem#getWorkingDirectory()
  1413. jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
  1414. HADOOP_FS, "getWorkingDirectory",
  1415. "()Lorg/apache/hadoop/fs/Path;");
  1416. if (jthr) {
  1417. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1418. "hdfsGetWorkingDirectory: FileSystem#getWorkingDirectory");
  1419. goto done;
  1420. }
  1421. jPath = jVal.l;
  1422. if (!jPath) {
  1423. fprintf(stderr, "hdfsGetWorkingDirectory: "
  1424. "FileSystem#getWorkingDirectory returned NULL");
  1425. ret = -EIO;
  1426. goto done;
  1427. }
  1428. //Path#toString()
  1429. jthr = invokeMethod(env, &jVal, INSTANCE, jPath,
  1430. "org/apache/hadoop/fs/Path", "toString",
  1431. "()Ljava/lang/String;");
  1432. if (jthr) {
  1433. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1434. "hdfsGetWorkingDirectory: Path#toString");
  1435. goto done;
  1436. }
  1437. jPathString = jVal.l;
  1438. jPathChars = (*env)->GetStringUTFChars(env, jPathString, NULL);
  1439. if (!jPathChars) {
  1440. ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  1441. "hdfsGetWorkingDirectory: GetStringUTFChars");
  1442. goto done;
  1443. }
  1444. //Copy to user-provided buffer
  1445. ret = snprintf(buffer, bufferSize, "%s", jPathChars);
  1446. if (ret >= bufferSize) {
  1447. ret = ENAMETOOLONG;
  1448. goto done;
  1449. }
  1450. ret = 0;
  1451. done:
  1452. if (jPathChars) {
  1453. (*env)->ReleaseStringUTFChars(env, jPathString, jPathChars);
  1454. }
  1455. destroyLocalReference(env, jPath);
  1456. destroyLocalReference(env, jPathString);
  1457. if (ret) {
  1458. errno = ret;
  1459. return NULL;
  1460. }
  1461. return buffer;
  1462. }
  1463. int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
  1464. {
  1465. // JAVA EQUIVALENT:
  1466. // fs.setWorkingDirectory(Path(path));
  1467. //Get the JNIEnv* corresponding to current thread
  1468. JNIEnv* env = getJNIEnv();
  1469. if (env == NULL) {
  1470. errno = EINTERNAL;
  1471. return -1;
  1472. }
  1473. jobject jFS = (jobject)fs;
  1474. jthrowable jthr;
  1475. jobject jPath;
  1476. //Create an object of org.apache.hadoop.fs.Path
  1477. jthr = constructNewObjectOfPath(env, path, &jPath);
  1478. if (jthr) {
  1479. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1480. "hdfsSetWorkingDirectory(%s): constructNewObjectOfPath",
  1481. path);
  1482. return -1;
  1483. }
  1484. //FileSystem#setWorkingDirectory()
  1485. jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
  1486. "setWorkingDirectory",
  1487. "(Lorg/apache/hadoop/fs/Path;)V", jPath);
  1488. destroyLocalReference(env, jPath);
  1489. if (jthr) {
  1490. errno = printExceptionAndFree(env, jthr, NOPRINT_EXC_ILLEGAL_ARGUMENT,
  1491. "hdfsSetWorkingDirectory(%s): FileSystem#setWorkingDirectory",
  1492. path);
  1493. return -1;
  1494. }
  1495. return 0;
  1496. }
  1497. int hdfsCreateDirectory(hdfsFS fs, const char* path)
  1498. {
  1499. // JAVA EQUIVALENT:
  1500. // fs.mkdirs(new Path(path));
  1501. //Get the JNIEnv* corresponding to current thread
  1502. JNIEnv* env = getJNIEnv();
  1503. if (env == NULL) {
  1504. errno = EINTERNAL;
  1505. return -1;
  1506. }
  1507. jobject jFS = (jobject)fs;
  1508. jobject jPath;
  1509. jthrowable jthr;
  1510. //Create an object of org.apache.hadoop.fs.Path
  1511. jthr = constructNewObjectOfPath(env, path, &jPath);
  1512. if (jthr) {
  1513. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1514. "hdfsCreateDirectory(%s): constructNewObjectOfPath", path);
  1515. return -1;
  1516. }
  1517. //Create the directory
  1518. jvalue jVal;
  1519. jVal.z = 0;
  1520. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
  1521. "mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z",
  1522. jPath);
  1523. destroyLocalReference(env, jPath);
  1524. if (jthr) {
  1525. errno = printExceptionAndFree(env, jthr,
  1526. NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
  1527. NOPRINT_EXC_UNRESOLVED_LINK | NOPRINT_EXC_PARENT_NOT_DIRECTORY,
  1528. "hdfsCreateDirectory(%s): FileSystem#mkdirs", path);
  1529. return -1;
  1530. }
  1531. if (!jVal.z) {
  1532. // It's unclear under exactly which conditions FileSystem#mkdirs
  1533. // is supposed to return false (as opposed to throwing an exception.)
  1534. // It seems like the current code never actually returns false.
  1535. // So we're going to translate this to EIO, since there seems to be
  1536. // nothing more specific we can do with it.
  1537. errno = EIO;
  1538. return -1;
  1539. }
  1540. return 0;
  1541. }
  1542. int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
  1543. {
  1544. // JAVA EQUIVALENT:
  1545. // fs.setReplication(new Path(path), replication);
  1546. //Get the JNIEnv* corresponding to current thread
  1547. JNIEnv* env = getJNIEnv();
  1548. if (env == NULL) {
  1549. errno = EINTERNAL;
  1550. return -1;
  1551. }
  1552. jobject jFS = (jobject)fs;
  1553. jthrowable jthr;
  1554. //Create an object of org.apache.hadoop.fs.Path
  1555. jobject jPath;
  1556. jthr = constructNewObjectOfPath(env, path, &jPath);
  1557. if (jthr) {
  1558. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1559. "hdfsSetReplication(path=%s): constructNewObjectOfPath", path);
  1560. return -1;
  1561. }
  1562. //Create the directory
  1563. jvalue jVal;
  1564. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
  1565. "setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
  1566. jPath, replication);
  1567. destroyLocalReference(env, jPath);
  1568. if (jthr) {
  1569. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1570. "hdfsSetReplication(path=%s, replication=%d): "
  1571. "FileSystem#setReplication", path, replication);
  1572. return -1;
  1573. }
  1574. if (!jVal.z) {
  1575. // setReplication returns false "if file does not exist or is a
  1576. // directory." So the nearest translation to that is ENOENT.
  1577. errno = ENOENT;
  1578. return -1;
  1579. }
  1580. return 0;
  1581. }
  1582. int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
  1583. {
  1584. // JAVA EQUIVALENT:
  1585. // fs.setOwner(path, owner, group)
  1586. //Get the JNIEnv* corresponding to current thread
  1587. JNIEnv* env = getJNIEnv();
  1588. if (env == NULL) {
  1589. errno = EINTERNAL;
  1590. return -1;
  1591. }
  1592. if (owner == NULL && group == NULL) {
  1593. return 0;
  1594. }
  1595. jobject jFS = (jobject)fs;
  1596. jobject jPath = NULL;
  1597. jstring jOwner = NULL, jGroup = NULL;
  1598. jthrowable jthr;
  1599. int ret;
  1600. jthr = constructNewObjectOfPath(env, path, &jPath);
  1601. if (jthr) {
  1602. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1603. "hdfsChown(path=%s): constructNewObjectOfPath", path);
  1604. goto done;
  1605. }
  1606. jthr = newJavaStr(env, owner, &jOwner);
  1607. if (jthr) {
  1608. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1609. "hdfsChown(path=%s): newJavaStr(%s)", path, owner);
  1610. goto done;
  1611. }
  1612. jthr = newJavaStr(env, group, &jGroup);
  1613. if (jthr) {
  1614. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1615. "hdfsChown(path=%s): newJavaStr(%s)", path, group);
  1616. goto done;
  1617. }
  1618. //Create the directory
  1619. jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
  1620. "setOwner", JMETHOD3(JPARAM(HADOOP_PATH),
  1621. JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID),
  1622. jPath, jOwner, jGroup);
  1623. if (jthr) {
  1624. ret = printExceptionAndFree(env, jthr,
  1625. NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
  1626. NOPRINT_EXC_UNRESOLVED_LINK,
  1627. "hdfsChown(path=%s, owner=%s, group=%s): "
  1628. "FileSystem#setOwner", path, owner, group);
  1629. goto done;
  1630. }
  1631. ret = 0;
  1632. done:
  1633. destroyLocalReference(env, jPath);
  1634. destroyLocalReference(env, jOwner);
  1635. destroyLocalReference(env, jGroup);
  1636. if (ret) {
  1637. errno = ret;
  1638. return -1;
  1639. }
  1640. return 0;
  1641. }
  1642. int hdfsChmod(hdfsFS fs, const char* path, short mode)
  1643. {
  1644. int ret;
  1645. // JAVA EQUIVALENT:
  1646. // fs.setPermission(path, FsPermission)
  1647. //Get the JNIEnv* corresponding to current thread
  1648. JNIEnv* env = getJNIEnv();
  1649. if (env == NULL) {
  1650. errno = EINTERNAL;
  1651. return -1;
  1652. }
  1653. jthrowable jthr;
  1654. jobject jPath = NULL, jPermObj = NULL;
  1655. jobject jFS = (jobject)fs;
  1656. // construct jPerm = FsPermission.createImmutable(short mode);
  1657. jshort jmode = mode;
  1658. jthr = constructNewObjectOfClass(env, &jPermObj,
  1659. HADOOP_FSPERM,"(S)V",jmode);
  1660. if (jthr) {
  1661. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1662. "constructNewObjectOfClass(%s)", HADOOP_FSPERM);
  1663. return -1;
  1664. }
  1665. //Create an object of org.apache.hadoop.fs.Path
  1666. jthr = constructNewObjectOfPath(env, path, &jPath);
  1667. if (jthr) {
  1668. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1669. "hdfsChmod(%s): constructNewObjectOfPath", path);
  1670. goto done;
  1671. }
  1672. //Create the directory
  1673. jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
  1674. "setPermission",
  1675. JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSPERM), JAVA_VOID),
  1676. jPath, jPermObj);
  1677. if (jthr) {
  1678. ret = printExceptionAndFree(env, jthr,
  1679. NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
  1680. NOPRINT_EXC_UNRESOLVED_LINK,
  1681. "hdfsChmod(%s): FileSystem#setPermission", path);
  1682. goto done;
  1683. }
  1684. ret = 0;
  1685. done:
  1686. destroyLocalReference(env, jPath);
  1687. destroyLocalReference(env, jPermObj);
  1688. if (ret) {
  1689. errno = ret;
  1690. return -1;
  1691. }
  1692. return 0;
  1693. }
  1694. int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
  1695. {
  1696. // JAVA EQUIVALENT:
  1697. // fs.setTimes(src, mtime, atime)
  1698. jthrowable jthr;
  1699. //Get the JNIEnv* corresponding to current thread
  1700. JNIEnv* env = getJNIEnv();
  1701. if (env == NULL) {
  1702. errno = EINTERNAL;
  1703. return -1;
  1704. }
  1705. jobject jFS = (jobject)fs;
  1706. //Create an object of org.apache.hadoop.fs.Path
  1707. jobject jPath;
  1708. jthr = constructNewObjectOfPath(env, path, &jPath);
  1709. if (jthr) {
  1710. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1711. "hdfsUtime(path=%s): constructNewObjectOfPath", path);
  1712. return -1;
  1713. }
  1714. const tTime NO_CHANGE = -1;
  1715. jlong jmtime = (mtime == NO_CHANGE) ? -1 : (mtime * (jlong)1000);
  1716. jlong jatime = (atime == NO_CHANGE) ? -1 : (atime * (jlong)1000);
  1717. jthr = invokeMethod(env, NULL, INSTANCE, jFS, HADOOP_FS,
  1718. "setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J", JAVA_VOID),
  1719. jPath, jmtime, jatime);
  1720. destroyLocalReference(env, jPath);
  1721. if (jthr) {
  1722. errno = printExceptionAndFree(env, jthr,
  1723. NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
  1724. NOPRINT_EXC_UNRESOLVED_LINK,
  1725. "hdfsUtime(path=%s): FileSystem#setTimes", path);
  1726. return -1;
  1727. }
  1728. return 0;
  1729. }
  1730. char***
  1731. hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
  1732. {
  1733. // JAVA EQUIVALENT:
  1734. // fs.getFileBlockLoctions(new Path(path), start, length);
  1735. jthrowable jthr;
  1736. jobject jPath = NULL;
  1737. jobject jFileStatus = NULL;
  1738. jvalue jFSVal, jVal;
  1739. jobjectArray jBlockLocations = NULL, jFileBlockHosts = NULL;
  1740. jstring jHost = NULL;
  1741. char*** blockHosts = NULL;
  1742. int i, j, ret;
  1743. jsize jNumFileBlocks = 0;
  1744. //Get the JNIEnv* corresponding to current thread
  1745. JNIEnv* env = getJNIEnv();
  1746. if (env == NULL) {
  1747. errno = EINTERNAL;
  1748. return NULL;
  1749. }
  1750. jobject jFS = (jobject)fs;
  1751. //Create an object of org.apache.hadoop.fs.Path
  1752. jthr = constructNewObjectOfPath(env, path, &jPath);
  1753. if (jthr) {
  1754. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1755. "hdfsGetHosts(path=%s): constructNewObjectOfPath", path);
  1756. goto done;
  1757. }
  1758. jthr = invokeMethod(env, &jFSVal, INSTANCE, jFS,
  1759. HADOOP_FS, "getFileStatus", "(Lorg/apache/hadoop/fs/Path;)"
  1760. "Lorg/apache/hadoop/fs/FileStatus;", jPath);
  1761. if (jthr) {
  1762. ret = printExceptionAndFree(env, jthr, NOPRINT_EXC_FILE_NOT_FOUND,
  1763. "hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
  1764. "FileSystem#getFileStatus", path, start, length);
  1765. destroyLocalReference(env, jPath);
  1766. goto done;
  1767. }
  1768. jFileStatus = jFSVal.l;
  1769. //org.apache.hadoop.fs.FileSystem#getFileBlockLocations
  1770. jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
  1771. HADOOP_FS, "getFileBlockLocations",
  1772. "(Lorg/apache/hadoop/fs/FileStatus;JJ)"
  1773. "[Lorg/apache/hadoop/fs/BlockLocation;",
  1774. jFileStatus, start, length);
  1775. if (jthr) {
  1776. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1777. "hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
  1778. "FileSystem#getFileBlockLocations", path, start, length);
  1779. goto done;
  1780. }
  1781. jBlockLocations = jVal.l;
  1782. //Figure out no of entries in jBlockLocations
  1783. //Allocate memory and add NULL at the end
  1784. jNumFileBlocks = (*env)->GetArrayLength(env, jBlockLocations);
  1785. blockHosts = calloc(jNumFileBlocks + 1, sizeof(char**));
  1786. if (blockHosts == NULL) {
  1787. ret = ENOMEM;
  1788. goto done;
  1789. }
  1790. if (jNumFileBlocks == 0) {
  1791. ret = 0;
  1792. goto done;
  1793. }
  1794. //Now parse each block to get hostnames
  1795. for (i = 0; i < jNumFileBlocks; ++i) {
  1796. jobject jFileBlock =
  1797. (*env)->GetObjectArrayElement(env, jBlockLocations, i);
  1798. if (!jFileBlock) {
  1799. ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  1800. "hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
  1801. "GetObjectArrayElement(%d)", path, start, length, i);
  1802. goto done;
  1803. }
  1804. jthr = invokeMethod(env, &jVal, INSTANCE, jFileBlock, HADOOP_BLK_LOC,
  1805. "getHosts", "()[Ljava/lang/String;");
  1806. if (jthr) {
  1807. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1808. "hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
  1809. "BlockLocation#getHosts", path, start, length);
  1810. goto done;
  1811. }
  1812. jFileBlockHosts = jVal.l;
  1813. if (!jFileBlockHosts) {
  1814. fprintf(stderr,
  1815. "hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"):"
  1816. "BlockLocation#getHosts returned NULL", path, start, length);
  1817. ret = EINTERNAL;
  1818. goto done;
  1819. }
  1820. //Figure out no of hosts in jFileBlockHosts, and allocate the memory
  1821. jsize jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts);
  1822. blockHosts[i] = calloc(jNumBlockHosts + 1, sizeof(char*));
  1823. if (!blockHosts[i]) {
  1824. ret = ENOMEM;
  1825. goto done;
  1826. }
  1827. //Now parse each hostname
  1828. const char *hostName;
  1829. for (j = 0; j < jNumBlockHosts; ++j) {
  1830. jHost = (*env)->GetObjectArrayElement(env, jFileBlockHosts, j);
  1831. if (!jHost) {
  1832. ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  1833. "hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64"): "
  1834. "NewByteArray", path, start, length);
  1835. goto done;
  1836. }
  1837. hostName =
  1838. (const char*)((*env)->GetStringUTFChars(env, jHost, NULL));
  1839. if (!hostName) {
  1840. ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  1841. "hdfsGetHosts(path=%s, start=%"PRId64", length=%"PRId64", "
  1842. "j=%d out of %d): GetStringUTFChars",
  1843. path, start, length, j, jNumBlockHosts);
  1844. goto done;
  1845. }
  1846. blockHosts[i][j] = strdup(hostName);
  1847. (*env)->ReleaseStringUTFChars(env, jHost, hostName);
  1848. if (!blockHosts[i][j]) {
  1849. ret = ENOMEM;
  1850. goto done;
  1851. }
  1852. destroyLocalReference(env, jHost);
  1853. jHost = NULL;
  1854. }
  1855. destroyLocalReference(env, jFileBlockHosts);
  1856. jFileBlockHosts = NULL;
  1857. }
  1858. ret = 0;
  1859. done:
  1860. destroyLocalReference(env, jPath);
  1861. destroyLocalReference(env, jFileStatus);
  1862. destroyLocalReference(env, jBlockLocations);
  1863. destroyLocalReference(env, jFileBlockHosts);
  1864. destroyLocalReference(env, jHost);
  1865. if (ret) {
  1866. if (blockHosts) {
  1867. hdfsFreeHosts(blockHosts);
  1868. }
  1869. return NULL;
  1870. }
  1871. return blockHosts;
  1872. }
  1873. void hdfsFreeHosts(char ***blockHosts)
  1874. {
  1875. int i, j;
  1876. for (i=0; blockHosts[i]; i++) {
  1877. for (j=0; blockHosts[i][j]; j++) {
  1878. free(blockHosts[i][j]);
  1879. }
  1880. free(blockHosts[i]);
  1881. }
  1882. free(blockHosts);
  1883. }
  1884. tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
  1885. {
  1886. // JAVA EQUIVALENT:
  1887. // fs.getDefaultBlockSize();
  1888. //Get the JNIEnv* corresponding to current thread
  1889. JNIEnv* env = getJNIEnv();
  1890. if (env == NULL) {
  1891. errno = EINTERNAL;
  1892. return -1;
  1893. }
  1894. jobject jFS = (jobject)fs;
  1895. //FileSystem#getDefaultBlockSize()
  1896. jvalue jVal;
  1897. jthrowable jthr;
  1898. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
  1899. "getDefaultBlockSize", "()J");
  1900. if (jthr) {
  1901. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1902. "hdfsGetDefaultBlockSize: FileSystem#getDefaultBlockSize");
  1903. return -1;
  1904. }
  1905. return jVal.j;
  1906. }
  1907. tOffset hdfsGetCapacity(hdfsFS fs)
  1908. {
  1909. // JAVA EQUIVALENT:
  1910. // FsStatus fss = fs.getStatus();
  1911. // return Fss.getCapacity();
  1912. //Get the JNIEnv* corresponding to current thread
  1913. JNIEnv* env = getJNIEnv();
  1914. if (env == NULL) {
  1915. errno = EINTERNAL;
  1916. return -1;
  1917. }
  1918. jobject jFS = (jobject)fs;
  1919. //FileSystem#getStatus
  1920. jvalue jVal;
  1921. jthrowable jthr;
  1922. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
  1923. "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
  1924. if (jthr) {
  1925. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1926. "hdfsGetCapacity: FileSystem#getStatus");
  1927. return -1;
  1928. }
  1929. jobject fss = (jobject)jVal.l;
  1930. jthr = invokeMethod(env, &jVal, INSTANCE, fss, HADOOP_FSSTATUS,
  1931. "getCapacity", "()J");
  1932. destroyLocalReference(env, fss);
  1933. if (jthr) {
  1934. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1935. "hdfsGetCapacity: FsStatus#getCapacity");
  1936. return -1;
  1937. }
  1938. return jVal.j;
  1939. }
  1940. tOffset hdfsGetUsed(hdfsFS fs)
  1941. {
  1942. // JAVA EQUIVALENT:
  1943. // FsStatus fss = fs.getStatus();
  1944. // return Fss.getUsed();
  1945. //Get the JNIEnv* corresponding to current thread
  1946. JNIEnv* env = getJNIEnv();
  1947. if (env == NULL) {
  1948. errno = EINTERNAL;
  1949. return -1;
  1950. }
  1951. jobject jFS = (jobject)fs;
  1952. //FileSystem#getStatus
  1953. jvalue jVal;
  1954. jthrowable jthr;
  1955. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
  1956. "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;");
  1957. if (jthr) {
  1958. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1959. "hdfsGetUsed: FileSystem#getStatus");
  1960. return -1;
  1961. }
  1962. jobject fss = (jobject)jVal.l;
  1963. jthr = invokeMethod(env, &jVal, INSTANCE, fss, HADOOP_FSSTATUS,
  1964. "getUsed", "()J");
  1965. destroyLocalReference(env, fss);
  1966. if (jthr) {
  1967. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  1968. "hdfsGetUsed: FsStatus#getUsed");
  1969. return -1;
  1970. }
  1971. return jVal.j;
  1972. }
  1973. static jthrowable
  1974. getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
  1975. {
  1976. jvalue jVal;
  1977. jthrowable jthr;
  1978. jobject jPath = NULL;
  1979. jstring jPathName = NULL;
  1980. jstring jUserName = NULL;
  1981. jstring jGroupName = NULL;
  1982. jobject jPermission = NULL;
  1983. jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
  1984. HADOOP_STAT, "isDir", "()Z");
  1985. if (jthr)
  1986. goto done;
  1987. fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile;
  1988. jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
  1989. HADOOP_STAT, "getReplication", "()S");
  1990. if (jthr)
  1991. goto done;
  1992. fileInfo->mReplication = jVal.s;
  1993. jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
  1994. HADOOP_STAT, "getBlockSize", "()J");
  1995. if (jthr)
  1996. goto done;
  1997. fileInfo->mBlockSize = jVal.j;
  1998. jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
  1999. HADOOP_STAT, "getModificationTime", "()J");
  2000. if (jthr)
  2001. goto done;
  2002. fileInfo->mLastMod = jVal.j / 1000;
  2003. jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
  2004. HADOOP_STAT, "getAccessTime", "()J");
  2005. if (jthr)
  2006. goto done;
  2007. fileInfo->mLastAccess = (tTime) (jVal.j / 1000);
  2008. if (fileInfo->mKind == kObjectKindFile) {
  2009. jthr = invokeMethod(env, &jVal, INSTANCE, jStat,
  2010. HADOOP_STAT, "getLen", "()J");
  2011. if (jthr)
  2012. goto done;
  2013. fileInfo->mSize = jVal.j;
  2014. }
  2015. jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
  2016. "getPath", "()Lorg/apache/hadoop/fs/Path;");
  2017. if (jthr)
  2018. goto done;
  2019. jPath = jVal.l;
  2020. if (jPath == NULL) {
  2021. jthr = newRuntimeError(env, "org.apache.hadoop.fs.FileStatus#"
  2022. "getPath returned NULL!");
  2023. goto done;
  2024. }
  2025. jthr = invokeMethod(env, &jVal, INSTANCE, jPath, HADOOP_PATH,
  2026. "toString", "()Ljava/lang/String;");
  2027. if (jthr)
  2028. goto done;
  2029. jPathName = jVal.l;
  2030. const char *cPathName =
  2031. (const char*) ((*env)->GetStringUTFChars(env, jPathName, NULL));
  2032. if (!cPathName) {
  2033. jthr = getPendingExceptionAndClear(env);
  2034. goto done;
  2035. }
  2036. fileInfo->mName = strdup(cPathName);
  2037. (*env)->ReleaseStringUTFChars(env, jPathName, cPathName);
  2038. jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
  2039. "getOwner", "()Ljava/lang/String;");
  2040. if (jthr)
  2041. goto done;
  2042. jUserName = jVal.l;
  2043. const char* cUserName =
  2044. (const char*) ((*env)->GetStringUTFChars(env, jUserName, NULL));
  2045. if (!cUserName) {
  2046. jthr = getPendingExceptionAndClear(env);
  2047. goto done;
  2048. }
  2049. fileInfo->mOwner = strdup(cUserName);
  2050. (*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
  2051. const char* cGroupName;
  2052. jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
  2053. "getGroup", "()Ljava/lang/String;");
  2054. if (jthr)
  2055. goto done;
  2056. jGroupName = jVal.l;
  2057. cGroupName = (const char*) ((*env)->GetStringUTFChars(env, jGroupName, NULL));
  2058. if (!cGroupName) {
  2059. jthr = getPendingExceptionAndClear(env);
  2060. goto done;
  2061. }
  2062. fileInfo->mGroup = strdup(cGroupName);
  2063. (*env)->ReleaseStringUTFChars(env, jGroupName, cGroupName);
  2064. jthr = invokeMethod(env, &jVal, INSTANCE, jStat, HADOOP_STAT,
  2065. "getPermission",
  2066. "()Lorg/apache/hadoop/fs/permission/FsPermission;");
  2067. if (jthr)
  2068. goto done;
  2069. if (jVal.l == NULL) {
  2070. jthr = newRuntimeError(env, "%s#getPermission returned NULL!",
  2071. HADOOP_STAT);
  2072. goto done;
  2073. }
  2074. jPermission = jVal.l;
  2075. jthr = invokeMethod(env, &jVal, INSTANCE, jPermission, HADOOP_FSPERM,
  2076. "toShort", "()S");
  2077. if (jthr)
  2078. goto done;
  2079. fileInfo->mPermissions = jVal.s;
  2080. jthr = NULL;
  2081. done:
  2082. if (jthr)
  2083. hdfsFreeFileInfoEntry(fileInfo);
  2084. destroyLocalReference(env, jPath);
  2085. destroyLocalReference(env, jPathName);
  2086. destroyLocalReference(env, jUserName);
  2087. destroyLocalReference(env, jGroupName);
  2088. destroyLocalReference(env, jPermission);
  2089. destroyLocalReference(env, jPath);
  2090. return jthr;
  2091. }
  2092. static jthrowable
  2093. getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo **fileInfo)
  2094. {
  2095. // JAVA EQUIVALENT:
  2096. // fs.isDirectory(f)
  2097. // fs.getModificationTime()
  2098. // fs.getAccessTime()
  2099. // fs.getLength(f)
  2100. // f.getPath()
  2101. // f.getOwner()
  2102. // f.getGroup()
  2103. // f.getPermission().toShort()
  2104. jobject jStat;
  2105. jvalue jVal;
  2106. jthrowable jthr;
  2107. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_FS,
  2108. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
  2109. jPath);
  2110. if (jthr)
  2111. return jthr;
  2112. if (jVal.z == 0) {
  2113. *fileInfo = NULL;
  2114. return NULL;
  2115. }
  2116. jthr = invokeMethod(env, &jVal, INSTANCE, jFS,
  2117. HADOOP_FS, "getFileStatus",
  2118. JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_STAT)), jPath);
  2119. if (jthr)
  2120. return jthr;
  2121. jStat = jVal.l;
  2122. *fileInfo = calloc(1, sizeof(hdfsFileInfo));
  2123. if (!*fileInfo) {
  2124. destroyLocalReference(env, jStat);
  2125. return newRuntimeError(env, "getFileInfo: OOM allocating hdfsFileInfo");
  2126. }
  2127. jthr = getFileInfoFromStat(env, jStat, *fileInfo);
  2128. destroyLocalReference(env, jStat);
  2129. return jthr;
  2130. }
  2131. hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
  2132. {
  2133. // JAVA EQUIVALENT:
  2134. // Path p(path);
  2135. // Path []pathList = fs.listPaths(p)
  2136. // foreach path in pathList
  2137. // getFileInfo(path)
  2138. jthrowable jthr;
  2139. jobject jPath = NULL;
  2140. hdfsFileInfo *pathList = NULL;
  2141. jobjectArray jPathList = NULL;
  2142. jvalue jVal;
  2143. jsize jPathListSize = 0;
  2144. int ret;
  2145. //Get the JNIEnv* corresponding to current thread
  2146. JNIEnv* env = getJNIEnv();
  2147. if (env == NULL) {
  2148. errno = EINTERNAL;
  2149. return NULL;
  2150. }
  2151. jobject jFS = (jobject)fs;
  2152. //Create an object of org.apache.hadoop.fs.Path
  2153. jthr = constructNewObjectOfPath(env, path, &jPath);
  2154. if (jthr) {
  2155. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  2156. "hdfsListDirectory(%s): constructNewObjectOfPath", path);
  2157. goto done;
  2158. }
  2159. jthr = invokeMethod(env, &jVal, INSTANCE, jFS, HADOOP_DFS, "listStatus",
  2160. JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_STAT)),
  2161. jPath);
  2162. if (jthr) {
  2163. ret = printExceptionAndFree(env, jthr,
  2164. NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
  2165. NOPRINT_EXC_UNRESOLVED_LINK,
  2166. "hdfsListDirectory(%s): FileSystem#listStatus", path);
  2167. goto done;
  2168. }
  2169. jPathList = jVal.l;
  2170. //Figure out the number of entries in that directory
  2171. jPathListSize = (*env)->GetArrayLength(env, jPathList);
  2172. if (jPathListSize == 0) {
  2173. ret = 0;
  2174. goto done;
  2175. }
  2176. //Allocate memory
  2177. pathList = calloc(jPathListSize, sizeof(hdfsFileInfo));
  2178. if (pathList == NULL) {
  2179. ret = ENOMEM;
  2180. goto done;
  2181. }
  2182. //Save path information in pathList
  2183. jsize i;
  2184. jobject tmpStat;
  2185. for (i=0; i < jPathListSize; ++i) {
  2186. tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i);
  2187. if (!tmpStat) {
  2188. ret = printPendingExceptionAndFree(env, PRINT_EXC_ALL,
  2189. "hdfsListDirectory(%s): GetObjectArrayElement(%d out of %d)",
  2190. path, i, jPathListSize);
  2191. goto done;
  2192. }
  2193. jthr = getFileInfoFromStat(env, tmpStat, &pathList[i]);
  2194. destroyLocalReference(env, tmpStat);
  2195. if (jthr) {
  2196. ret = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  2197. "hdfsListDirectory(%s): getFileInfoFromStat(%d out of %d)",
  2198. path, i, jPathListSize);
  2199. goto done;
  2200. }
  2201. }
  2202. ret = 0;
  2203. done:
  2204. destroyLocalReference(env, jPath);
  2205. destroyLocalReference(env, jPathList);
  2206. if (ret) {
  2207. hdfsFreeFileInfo(pathList, jPathListSize);
  2208. errno = ret;
  2209. return NULL;
  2210. }
  2211. *numEntries = jPathListSize;
  2212. return pathList;
  2213. }
  2214. hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
  2215. {
  2216. // JAVA EQUIVALENT:
  2217. // File f(path);
  2218. // fs.isDirectory(f)
  2219. // fs.lastModified() ??
  2220. // fs.getLength(f)
  2221. // f.getPath()
  2222. //Get the JNIEnv* corresponding to current thread
  2223. JNIEnv* env = getJNIEnv();
  2224. if (env == NULL) {
  2225. errno = EINTERNAL;
  2226. return NULL;
  2227. }
  2228. jobject jFS = (jobject)fs;
  2229. //Create an object of org.apache.hadoop.fs.Path
  2230. jobject jPath;
  2231. jthrowable jthr = constructNewObjectOfPath(env, path, &jPath);
  2232. if (jthr) {
  2233. errno = printExceptionAndFree(env, jthr, PRINT_EXC_ALL,
  2234. "hdfsGetPathInfo(%s): constructNewObjectOfPath", path);
  2235. return NULL;
  2236. }
  2237. hdfsFileInfo *fileInfo;
  2238. jthr = getFileInfo(env, jFS, jPath, &fileInfo);
  2239. destroyLocalReference(env, jPath);
  2240. if (jthr) {
  2241. errno = printExceptionAndFree(env, jthr,
  2242. NOPRINT_EXC_ACCESS_CONTROL | NOPRINT_EXC_FILE_NOT_FOUND |
  2243. NOPRINT_EXC_UNRESOLVED_LINK,
  2244. "hdfsGetPathInfo(%s): getFileInfo", path);
  2245. return NULL;
  2246. }
  2247. if (!fileInfo) {
  2248. errno = ENOENT;
  2249. return NULL;
  2250. }
  2251. return fileInfo;
  2252. }
  2253. static void hdfsFreeFileInfoEntry(hdfsFileInfo *hdfsFileInfo)
  2254. {
  2255. free(hdfsFileInfo->mName);
  2256. free(hdfsFileInfo->mOwner);
  2257. free(hdfsFileInfo->mGroup);
  2258. memset(hdfsFileInfo, 0, sizeof(hdfsFileInfo));
  2259. }
  2260. void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
  2261. {
  2262. //Free the mName, mOwner, and mGroup
  2263. int i;
  2264. for (i=0; i < numEntries; ++i) {
  2265. hdfsFreeFileInfoEntry(hdfsFileInfo + i);
  2266. }
  2267. //Free entire block
  2268. free(hdfsFileInfo);
  2269. }
  2270. /**
  2271. * vim: ts=4: sw=4: et:
  2272. */