hdfs.c 63 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042204320442045204620472048204920502051205220532054205520562057205820592060206120622063206420652066206720682069207020712072207320742075207620772078207920802081208220832084208520862087208820892090209120922093209420952096209720982099210021012102210321042105210621072108210921102111211221132114211521162117211821192120212121222123212421252126212721282129213021312132213321342135213621372138213921402141214221432144214521462147214821492150215121522153215421552156215721582159216021612162216321642165216621672168216921702171217221732174217521762177217821792180218121822183218421852186218721882189219021912192219321942195219621972198219922002201220222032204220522062207220822092210221122122213
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #include "hdfs.h"
  19. #include "hdfsJniHelper.h"
  20. /* Some frequently used Java paths */
  21. #define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
  22. #define HADOOP_PATH "org/apache/hadoop/fs/Path"
  23. #define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem"
  24. #define HADOOP_FS "org/apache/hadoop/fs/FileSystem"
  25. #define HADOOP_FSSTATUS "org/apache/hadoop/fs/FsStatus"
  26. #define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation"
  27. #define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem"
  28. #define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream"
  29. #define HADOOP_OSTRM "org/apache/hadoop/fs/FSDataOutputStream"
  30. #define HADOOP_STAT "org/apache/hadoop/fs/FileStatus"
  31. #define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission"
  32. #define JAVA_NET_ISA "java/net/InetSocketAddress"
  33. #define JAVA_NET_URI "java/net/URI"
  34. #define JAVA_STRING "java/lang/String"
  35. #define JAVA_VOID "V"
  36. /* Macros for constructing method signatures */
  37. #define JPARAM(X) "L" X ";"
  38. #define JARRPARAM(X) "[L" X ";"
  39. #define JMETHOD1(X, R) "(" X ")" R
  40. #define JMETHOD2(X, Y, R) "(" X Y ")" R
  41. #define JMETHOD3(X, Y, Z, R) "(" X Y Z")" R
  42. /**
  43. * hdfsJniEnv: A wrapper struct to be used as 'value'
  44. * while saving thread -> JNIEnv* mappings
  45. */
  46. typedef struct
  47. {
  48. JNIEnv* env;
  49. } hdfsJniEnv;
  50. /**
  51. * Helper function to destroy a local reference of java.lang.Object
  52. * @param env: The JNIEnv pointer.
  53. * @param jFile: The local reference of java.lang.Object object
  54. * @return None.
  55. */
  56. static void destroyLocalReference(JNIEnv *env, jobject jObject)
  57. {
  58. if (jObject)
  59. (*env)->DeleteLocalRef(env, jObject);
  60. }
  61. /**
  62. * Helper function to create a org.apache.hadoop.fs.Path object.
  63. * @param env: The JNIEnv pointer.
  64. * @param path: The file-path for which to construct org.apache.hadoop.fs.Path
  65. * object.
  66. * @return Returns a jobject on success and NULL on error.
  67. */
  68. static jobject constructNewObjectOfPath(JNIEnv *env, const char *path)
  69. {
  70. //Construct a java.lang.String object
  71. jstring jPathString = (*env)->NewStringUTF(env, path);
  72. //Construct the org.apache.hadoop.fs.Path object
  73. jobject jPath =
  74. constructNewObjectOfClass(env, NULL, "org/apache/hadoop/fs/Path",
  75. "(Ljava/lang/String;)V", jPathString);
  76. if (jPath == NULL) {
  77. fprintf(stderr, "Can't construct instance of class "
  78. "org.apache.hadoop.fs.Path for %s\n", path);
  79. errno = EINTERNAL;
  80. return NULL;
  81. }
  82. // Destroy the local reference to the java.lang.String object
  83. destroyLocalReference(env, jPathString);
  84. return jPath;
  85. }
  86. /**
  87. * Helper function to translate an exception into a meaningful errno value.
  88. * @param exc: The exception.
  89. * @param env: The JNIEnv Pointer.
  90. * @param method: The name of the method that threw the exception. This
  91. * may be format string to be used in conjuction with additional arguments.
  92. * @return Returns a meaningful errno value if possible, or EINTERNAL if not.
  93. */
  94. static int errnoFromException(jthrowable exc, JNIEnv *env,
  95. const char *method, ...)
  96. {
  97. va_list ap;
  98. int errnum = 0;
  99. char *excClass = NULL;
  100. if (exc == NULL)
  101. goto default_error;
  102. if ((excClass = classNameOfObject((jobject) exc, env)) == NULL) {
  103. errnum = EINTERNAL;
  104. goto done;
  105. }
  106. if (!strcmp(excClass, "org.apache.hadoop.security."
  107. "AccessControlException")) {
  108. errnum = EACCES;
  109. goto done;
  110. }
  111. if (!strcmp(excClass, "org.apache.hadoop.hdfs.protocol."
  112. "QuotaExceededException")) {
  113. errnum = EDQUOT;
  114. goto done;
  115. }
  116. if (!strcmp(excClass, "java.io.FileNotFoundException")) {
  117. errnum = ENOENT;
  118. goto done;
  119. }
  120. //TODO: interpret more exceptions; maybe examine exc.getMessage()
  121. default_error:
  122. //Can't tell what went wrong, so just punt
  123. (*env)->ExceptionDescribe(env);
  124. fprintf(stderr, "Call to ");
  125. va_start(ap, method);
  126. vfprintf(stderr, method, ap);
  127. va_end(ap);
  128. fprintf(stderr, " failed!\n");
  129. errnum = EINTERNAL;
  130. done:
  131. (*env)->ExceptionClear(env);
  132. if (excClass != NULL)
  133. free(excClass);
  134. return errnum;
  135. }
  136. hdfsFS hdfsConnect(const char* host, tPort port) {
  137. // connect with NULL as user name
  138. return hdfsConnectAsUser(host, port, NULL);
  139. }
  140. /** Always return a new FileSystem handle */
  141. hdfsFS hdfsConnectNewInstance(const char* host, tPort port) {
  142. // connect with NULL as user name/groups
  143. return hdfsConnectAsUserNewInstance(host, port, NULL);
  144. }
  145. hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user)
  146. {
  147. // JAVA EQUIVALENT:
  148. // FileSystem fs = FileSystem.get(new Configuration());
  149. // return fs;
  150. JNIEnv *env = 0;
  151. jobject jConfiguration = NULL;
  152. jobject jFS = NULL;
  153. jobject jURI = NULL;
  154. jstring jURIString = NULL;
  155. jvalue jVal;
  156. jthrowable jExc = NULL;
  157. char *cURI = 0;
  158. jobject gFsRef = NULL;
  159. jstring jUserString = NULL;
  160. //Get the JNIEnv* corresponding to current thread
  161. env = getJNIEnv();
  162. if (env == NULL) {
  163. errno = EINTERNAL;
  164. return NULL;
  165. }
  166. //Create the org.apache.hadoop.conf.Configuration object
  167. jConfiguration =
  168. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  169. if (jConfiguration == NULL) {
  170. fprintf(stderr, "Can't construct instance of class "
  171. "org.apache.hadoop.conf.Configuration\n");
  172. errno = EINTERNAL;
  173. return NULL;
  174. }
  175. if (user != NULL) {
  176. jUserString = (*env)->NewStringUTF(env, user);
  177. }
  178. //Check what type of FileSystem the caller wants...
  179. if (host == NULL) {
  180. // fs = FileSytem::getLocal(conf);
  181. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "getLocal",
  182. JMETHOD1(JPARAM(HADOOP_CONF),
  183. JPARAM(HADOOP_LOCALFS)),
  184. jConfiguration) != 0) {
  185. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  186. "FileSystem::getLocal");
  187. goto done;
  188. }
  189. jFS = jVal.l;
  190. }
  191. //FileSystem.get(conf) -> FileSystem.get(FileSystem.getDefaultUri(conf),
  192. // conf, user)
  193. else if (!strcmp(host, "default") && port == 0) {
  194. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS,
  195. "getDefaultUri",
  196. "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/URI;",
  197. jConfiguration) != 0) {
  198. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs.",
  199. "FileSystem::getDefaultUri");
  200. goto done;
  201. }
  202. jURI = jVal.l;
  203. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "get",
  204. JMETHOD3(JPARAM(JAVA_NET_URI),
  205. JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING),
  206. JPARAM(HADOOP_FS)),
  207. jURI, jConfiguration, jUserString) != 0) {
  208. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  209. "Filesystem::get(URI, Configuration)");
  210. goto done;
  211. }
  212. jFS = jVal.l;
  213. }
  214. else {
  215. // fs = FileSystem::get(URI, conf, ugi);
  216. cURI = malloc(strlen(host)+16);
  217. sprintf(cURI, "hdfs://%s:%d", host, (int)(port));
  218. if (cURI == NULL) {
  219. fprintf (stderr, "Couldn't allocate an object of size %d",
  220. (int)(strlen(host) + 16));
  221. errno = EINTERNAL;
  222. goto done;
  223. }
  224. jURIString = (*env)->NewStringUTF(env, cURI);
  225. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, JAVA_NET_URI,
  226. "create", "(Ljava/lang/String;)Ljava/net/URI;",
  227. jURIString) != 0) {
  228. errno = errnoFromException(jExc, env, "java.net.URI::create");
  229. goto done;
  230. }
  231. jURI = jVal.l;
  232. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "get",
  233. JMETHOD3(JPARAM(JAVA_NET_URI),
  234. JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING),
  235. JPARAM(HADOOP_FS)),
  236. jURI, jConfiguration, jUserString) != 0) {
  237. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  238. "Filesystem::get(URI, Configuration)");
  239. goto done;
  240. }
  241. jFS = jVal.l;
  242. }
  243. done:
  244. // Release unnecessary local references
  245. destroyLocalReference(env, jConfiguration);
  246. destroyLocalReference(env, jURIString);
  247. destroyLocalReference(env, jURI);
  248. destroyLocalReference(env, jUserString);
  249. if (cURI) free(cURI);
  250. /* Create a global reference for this fs */
  251. if (jFS) {
  252. gFsRef = (*env)->NewGlobalRef(env, jFS);
  253. destroyLocalReference(env, jFS);
  254. }
  255. return gFsRef;
  256. }
  257. /** Always return a new FileSystem handle */
  258. hdfsFS hdfsConnectAsUserNewInstance(const char* host, tPort port, const char *user)
  259. {
  260. // JAVA EQUIVALENT:
  261. // FileSystem fs = FileSystem.get(new Configuration());
  262. // return fs;
  263. JNIEnv *env = 0;
  264. jobject jConfiguration = NULL;
  265. jobject jFS = NULL;
  266. jobject jURI = NULL;
  267. jstring jURIString = NULL;
  268. jvalue jVal;
  269. jthrowable jExc = NULL;
  270. char *cURI = 0;
  271. jobject gFsRef = NULL;
  272. jstring jUserString = NULL;
  273. //Get the JNIEnv* corresponding to current thread
  274. env = getJNIEnv();
  275. if (env == NULL) {
  276. errno = EINTERNAL;
  277. return NULL;
  278. }
  279. //Create the org.apache.hadoop.conf.Configuration object
  280. jConfiguration =
  281. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  282. if (jConfiguration == NULL) {
  283. fprintf(stderr, "Can't construct instance of class "
  284. "org.apache.hadoop.conf.Configuration\n");
  285. errno = EINTERNAL;
  286. return NULL;
  287. }
  288. if (user != NULL) {
  289. jUserString = (*env)->NewStringUTF(env, user);
  290. }
  291. //Check what type of FileSystem the caller wants...
  292. if (host == NULL) {
  293. // fs = FileSytem::newInstanceLocal(conf);
  294. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "newInstanceLocal",
  295. JMETHOD1(JPARAM(HADOOP_CONF),
  296. JPARAM(HADOOP_LOCALFS)),
  297. jConfiguration) != 0) {
  298. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  299. "FileSystem::newInstanceLocal");
  300. goto done;
  301. }
  302. jFS = jVal.l;
  303. }
  304. else if (!strcmp(host, "default") && port == 0) {
  305. //fs = FileSystem::get(conf);
  306. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS,
  307. "getDefaultUri",
  308. "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/URI;",
  309. jConfiguration) != 0) {
  310. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs.",
  311. "FileSystem::getDefaultUri");
  312. goto done;
  313. }
  314. jURI = jVal.l;
  315. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
  316. HADOOP_FS, "newInstance",
  317. JMETHOD3(JPARAM(JAVA_NET_URI),
  318. JPARAM(HADOOP_CONF),
  319. JPARAM(JAVA_STRING),
  320. JPARAM(HADOOP_FS)),
  321. jURI, jConfiguration, jUserString) != 0) {
  322. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  323. "FileSystem::newInstance");
  324. goto done;
  325. }
  326. jFS = jVal.l;
  327. }
  328. else {
  329. // fs = FileSystem::newInstance(URI, conf);
  330. cURI = malloc(strlen(host)+16);
  331. sprintf(cURI, "hdfs://%s:%d", host, (int)(port));
  332. jURIString = (*env)->NewStringUTF(env, cURI);
  333. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, JAVA_NET_URI,
  334. "create", "(Ljava/lang/String;)Ljava/net/URI;",
  335. jURIString) != 0) {
  336. errno = errnoFromException(jExc, env, "java.net.URI::create");
  337. goto done;
  338. }
  339. jURI = jVal.l;
  340. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "newInstance",
  341. JMETHOD3(JPARAM(JAVA_NET_URI),
  342. JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING),
  343. JPARAM(HADOOP_FS)),
  344. jURI, jConfiguration, jUserString) != 0) {
  345. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  346. "Filesystem::newInstance(URI, Configuration)");
  347. goto done;
  348. }
  349. jFS = jVal.l;
  350. }
  351. done:
  352. // Release unnecessary local references
  353. destroyLocalReference(env, jConfiguration);
  354. destroyLocalReference(env, jURIString);
  355. destroyLocalReference(env, jURI);
  356. destroyLocalReference(env, jUserString);
  357. if (cURI) free(cURI);
  358. /* Create a global reference for this fs */
  359. if (jFS) {
  360. gFsRef = (*env)->NewGlobalRef(env, jFS);
  361. destroyLocalReference(env, jFS);
  362. }
  363. return gFsRef;
  364. }
  365. int hdfsDisconnect(hdfsFS fs)
  366. {
  367. // JAVA EQUIVALENT:
  368. // fs.close()
  369. //Get the JNIEnv* corresponding to current thread
  370. JNIEnv* env = getJNIEnv();
  371. if (env == NULL) {
  372. errno = EINTERNAL;
  373. return -2;
  374. }
  375. //Parameters
  376. jobject jFS = (jobject)fs;
  377. //Caught exception
  378. jthrowable jExc = NULL;
  379. //Sanity check
  380. if (fs == NULL) {
  381. errno = EBADF;
  382. return -1;
  383. }
  384. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  385. "close", "()V") != 0) {
  386. errno = errnoFromException(jExc, env, "Filesystem::close");
  387. return -1;
  388. }
  389. //Release unnecessary references
  390. (*env)->DeleteGlobalRef(env, fs);
  391. return 0;
  392. }
  393. hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
  394. int bufferSize, short replication, tSize blockSize)
  395. {
  396. /*
  397. JAVA EQUIVALENT:
  398. File f = new File(path);
  399. FSData{Input|Output}Stream f{is|os} = fs.create(f);
  400. return f{is|os};
  401. */
  402. /* Get the JNIEnv* corresponding to current thread */
  403. JNIEnv* env = getJNIEnv();
  404. if (env == NULL) {
  405. errno = EINTERNAL;
  406. return NULL;
  407. }
  408. jobject jFS = (jobject)fs;
  409. if (flags & O_RDWR) {
  410. fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n");
  411. errno = ENOTSUP;
  412. return NULL;
  413. }
  414. if ((flags & O_CREAT) && (flags & O_EXCL)) {
  415. fprintf(stderr, "WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
  416. }
  417. /* The hadoop java api/signature */
  418. const char* method = ((flags & O_WRONLY) == 0) ? "open" : (flags & O_APPEND) ? "append" : "create";
  419. const char* signature = ((flags & O_WRONLY) == 0) ?
  420. JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM)) :
  421. (flags & O_APPEND) ?
  422. JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_OSTRM)) :
  423. JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
  424. /* Return value */
  425. hdfsFile file = NULL;
  426. /* Create an object of org.apache.hadoop.fs.Path */
  427. jobject jPath = constructNewObjectOfPath(env, path);
  428. if (jPath == NULL) {
  429. return NULL;
  430. }
  431. /* Get the Configuration object from the FileSystem object */
  432. jvalue jVal;
  433. jobject jConfiguration = NULL;
  434. jthrowable jExc = NULL;
  435. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  436. "getConf", JMETHOD1("", JPARAM(HADOOP_CONF))) != 0) {
  437. errno = errnoFromException(jExc, env, "get configuration object "
  438. "from filesystem");
  439. destroyLocalReference(env, jPath);
  440. return NULL;
  441. }
  442. jConfiguration = jVal.l;
  443. jint jBufferSize = bufferSize;
  444. jshort jReplication = replication;
  445. jlong jBlockSize = blockSize;
  446. jstring jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size");
  447. jstring jStrReplication = (*env)->NewStringUTF(env, "dfs.replication");
  448. jstring jStrBlockSize = (*env)->NewStringUTF(env, "dfs.block.size");
  449. //bufferSize
  450. if (!bufferSize) {
  451. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  452. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  453. jStrBufferSize, 4096) != 0) {
  454. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  455. "Configuration::getInt");
  456. goto done;
  457. }
  458. jBufferSize = jVal.i;
  459. }
  460. if ((flags & O_WRONLY) && (flags & O_APPEND) == 0) {
  461. //replication
  462. if (!replication) {
  463. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  464. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  465. jStrReplication, 1) != 0) {
  466. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  467. "Configuration::getInt");
  468. goto done;
  469. }
  470. jReplication = jVal.i;
  471. }
  472. //blockSize
  473. if (!blockSize) {
  474. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  475. HADOOP_CONF, "getLong", "(Ljava/lang/String;J)J",
  476. jStrBlockSize, (jlong)67108864)) {
  477. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  478. "FileSystem::%s(%s)", method,
  479. signature);
  480. goto done;
  481. }
  482. jBlockSize = jVal.j;
  483. }
  484. }
  485. /* Create and return either the FSDataInputStream or
  486. FSDataOutputStream references jobject jStream */
  487. // READ?
  488. if ((flags & O_WRONLY) == 0) {
  489. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  490. method, signature, jPath, jBufferSize)) {
  491. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  492. "FileSystem::%s(%s)", method,
  493. signature);
  494. goto done;
  495. }
  496. } else if ((flags & O_WRONLY) && (flags & O_APPEND)) {
  497. // WRITE/APPEND?
  498. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  499. method, signature, jPath)) {
  500. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  501. "FileSystem::%s(%s)", method,
  502. signature);
  503. goto done;
  504. }
  505. } else {
  506. // WRITE/CREATE
  507. jboolean jOverWrite = 1;
  508. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  509. method, signature, jPath, jOverWrite,
  510. jBufferSize, jReplication, jBlockSize)) {
  511. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  512. "FileSystem::%s(%s)", method,
  513. signature);
  514. goto done;
  515. }
  516. }
  517. file = malloc(sizeof(struct hdfsFile_internal));
  518. if (!file) {
  519. errno = ENOMEM;
  520. } else {
  521. file->file = (*env)->NewGlobalRef(env, jVal.l);
  522. file->type = (((flags & O_WRONLY) == 0) ? INPUT : OUTPUT);
  523. destroyLocalReference(env, jVal.l);
  524. }
  525. done:
  526. //Delete unnecessary local references
  527. destroyLocalReference(env, jStrBufferSize);
  528. destroyLocalReference(env, jStrReplication);
  529. destroyLocalReference(env, jStrBlockSize);
  530. destroyLocalReference(env, jConfiguration);
  531. destroyLocalReference(env, jPath);
  532. return file;
  533. }
  534. int hdfsCloseFile(hdfsFS fs, hdfsFile file)
  535. {
  536. // JAVA EQUIVALENT:
  537. // file.close
  538. //Get the JNIEnv* corresponding to current thread
  539. JNIEnv* env = getJNIEnv();
  540. if (env == NULL) {
  541. errno = EINTERNAL;
  542. return -2;
  543. }
  544. //Parameters
  545. jobject jStream = (jobject)(file ? file->file : NULL);
  546. //Caught exception
  547. jthrowable jExc = NULL;
  548. //Sanity check
  549. if (!file || file->type == UNINITIALIZED) {
  550. errno = EBADF;
  551. return -1;
  552. }
  553. //The interface whose 'close' method to be called
  554. const char* interface = (file->type == INPUT) ?
  555. HADOOP_ISTRM : HADOOP_OSTRM;
  556. if (invokeMethod(env, NULL, &jExc, INSTANCE, jStream, interface,
  557. "close", "()V") != 0) {
  558. errno = errnoFromException(jExc, env, "%s::close", interface);
  559. return -1;
  560. }
  561. //De-allocate memory
  562. free(file);
  563. (*env)->DeleteGlobalRef(env, jStream);
  564. return 0;
  565. }
  566. int hdfsExists(hdfsFS fs, const char *path)
  567. {
  568. JNIEnv *env = getJNIEnv();
  569. if (env == NULL) {
  570. errno = EINTERNAL;
  571. return -2;
  572. }
  573. jobject jPath = constructNewObjectOfPath(env, path);
  574. jvalue jVal;
  575. jthrowable jExc = NULL;
  576. jobject jFS = (jobject)fs;
  577. if (jPath == NULL) {
  578. return -1;
  579. }
  580. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  581. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
  582. jPath) != 0) {
  583. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  584. "FileSystem::exists");
  585. destroyLocalReference(env, jPath);
  586. return -1;
  587. }
  588. destroyLocalReference(env, jPath);
  589. return jVal.z ? 0 : -1;
  590. }
  591. tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
  592. {
  593. // JAVA EQUIVALENT:
  594. // byte [] bR = new byte[length];
  595. // fis.read(bR);
  596. //Get the JNIEnv* corresponding to current thread
  597. JNIEnv* env = getJNIEnv();
  598. if (env == NULL) {
  599. errno = EINTERNAL;
  600. return -1;
  601. }
  602. //Parameters
  603. jobject jInputStream = (jobject)(f ? f->file : NULL);
  604. jbyteArray jbRarray;
  605. jint noReadBytes = 0;
  606. jvalue jVal;
  607. jthrowable jExc = NULL;
  608. //Sanity check
  609. if (!f || f->type == UNINITIALIZED) {
  610. errno = EBADF;
  611. return -1;
  612. }
  613. //Error checking... make sure that this file is 'readable'
  614. if (f->type != INPUT) {
  615. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  616. errno = EINVAL;
  617. return -1;
  618. }
  619. //Read the requisite bytes
  620. jbRarray = (*env)->NewByteArray(env, length);
  621. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  622. "read", "([B)I", jbRarray) != 0) {
  623. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  624. "FSDataInputStream::read");
  625. noReadBytes = -1;
  626. }
  627. else {
  628. noReadBytes = jVal.i;
  629. if (noReadBytes > 0) {
  630. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  631. } else {
  632. //This is a valid case: there aren't any bytes left to read!
  633. if (noReadBytes == 0 || noReadBytes < -1) {
  634. fprintf(stderr, "WARN: FSDataInputStream.read returned invalid return code - libhdfs returning EOF, i.e., 0: %d\n", noReadBytes);
  635. }
  636. noReadBytes = 0;
  637. }
  638. errno = 0;
  639. }
  640. destroyLocalReference(env, jbRarray);
  641. return noReadBytes;
  642. }
  643. tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
  644. void* buffer, tSize length)
  645. {
  646. // JAVA EQUIVALENT:
  647. // byte [] bR = new byte[length];
  648. // fis.read(pos, bR, 0, length);
  649. //Get the JNIEnv* corresponding to current thread
  650. JNIEnv* env = getJNIEnv();
  651. if (env == NULL) {
  652. errno = EINTERNAL;
  653. return -1;
  654. }
  655. //Parameters
  656. jobject jInputStream = (jobject)(f ? f->file : NULL);
  657. jbyteArray jbRarray;
  658. jint noReadBytes = 0;
  659. jvalue jVal;
  660. jthrowable jExc = NULL;
  661. //Sanity check
  662. if (!f || f->type == UNINITIALIZED) {
  663. errno = EBADF;
  664. return -1;
  665. }
  666. //Error checking... make sure that this file is 'readable'
  667. if (f->type != INPUT) {
  668. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  669. errno = EINVAL;
  670. return -1;
  671. }
  672. //Read the requisite bytes
  673. jbRarray = (*env)->NewByteArray(env, length);
  674. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  675. "read", "(J[BII)I", position, jbRarray, 0, length) != 0) {
  676. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  677. "FSDataInputStream::read");
  678. noReadBytes = -1;
  679. }
  680. else {
  681. noReadBytes = jVal.i;
  682. if (noReadBytes > 0) {
  683. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  684. } else {
  685. //This is a valid case: there aren't any bytes left to read!
  686. if (noReadBytes == 0 || noReadBytes < -1) {
  687. fprintf(stderr, "WARN: FSDataInputStream.read returned invalid return code - libhdfs returning EOF, i.e., 0: %d\n", noReadBytes);
  688. }
  689. noReadBytes = 0;
  690. }
  691. errno = 0;
  692. }
  693. destroyLocalReference(env, jbRarray);
  694. return noReadBytes;
  695. }
  696. tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
  697. {
  698. // JAVA EQUIVALENT
  699. // byte b[] = str.getBytes();
  700. // fso.write(b);
  701. //Get the JNIEnv* corresponding to current thread
  702. JNIEnv* env = getJNIEnv();
  703. if (env == NULL) {
  704. errno = EINTERNAL;
  705. return -1;
  706. }
  707. //Parameters
  708. jobject jOutputStream = (jobject)(f ? f->file : 0);
  709. jbyteArray jbWarray;
  710. //Caught exception
  711. jthrowable jExc = NULL;
  712. //Sanity check
  713. if (!f || f->type == UNINITIALIZED) {
  714. errno = EBADF;
  715. return -1;
  716. }
  717. if (length < 0) {
  718. errno = EINVAL;
  719. return -1;
  720. }
  721. //Error checking... make sure that this file is 'writable'
  722. if (f->type != OUTPUT) {
  723. fprintf(stderr, "Cannot write into a non-OutputStream object!\n");
  724. errno = EINVAL;
  725. return -1;
  726. }
  727. // 'length' equals 'zero' is a valid use-case according to Posix!
  728. if (length != 0) {
  729. //Write the requisite bytes into the file
  730. jbWarray = (*env)->NewByteArray(env, length);
  731. (*env)->SetByteArrayRegion(env, jbWarray, 0, length, buffer);
  732. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  733. HADOOP_OSTRM, "write",
  734. "([B)V", jbWarray) != 0) {
  735. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  736. "FSDataOutputStream::write");
  737. length = -1;
  738. }
  739. destroyLocalReference(env, jbWarray);
  740. }
  741. //Return no. of bytes succesfully written (libc way)
  742. //i.e. 'length' itself! ;-)
  743. return length;
  744. }
  745. int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
  746. {
  747. // JAVA EQUIVALENT
  748. // fis.seek(pos);
  749. //Get the JNIEnv* corresponding to current thread
  750. JNIEnv* env = getJNIEnv();
  751. if (env == NULL) {
  752. errno = EINTERNAL;
  753. return -1;
  754. }
  755. //Parameters
  756. jobject jInputStream = (jobject)(f ? f->file : 0);
  757. //Caught exception
  758. jthrowable jExc = NULL;
  759. //Sanity check
  760. if (!f || f->type != INPUT) {
  761. errno = EBADF;
  762. return -1;
  763. }
  764. if (invokeMethod(env, NULL, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  765. "seek", "(J)V", desiredPos) != 0) {
  766. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  767. "FSDataInputStream::seek");
  768. return -1;
  769. }
  770. return 0;
  771. }
  772. tOffset hdfsTell(hdfsFS fs, hdfsFile f)
  773. {
  774. // JAVA EQUIVALENT
  775. // pos = f.getPos();
  776. //Get the JNIEnv* corresponding to current thread
  777. JNIEnv* env = getJNIEnv();
  778. if (env == NULL) {
  779. errno = EINTERNAL;
  780. return -1;
  781. }
  782. //Parameters
  783. jobject jStream = (jobject)(f ? f->file : 0);
  784. //Sanity check
  785. if (!f || f->type == UNINITIALIZED) {
  786. errno = EBADF;
  787. return -1;
  788. }
  789. const char* interface = (f->type == INPUT) ?
  790. HADOOP_ISTRM : HADOOP_OSTRM;
  791. jlong currentPos = -1;
  792. jvalue jVal;
  793. jthrowable jExc = NULL;
  794. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStream,
  795. interface, "getPos", "()J") != 0) {
  796. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  797. "FSDataInputStream::getPos");
  798. return -1;
  799. }
  800. currentPos = jVal.j;
  801. return (tOffset)currentPos;
  802. }
  803. int hdfsFlush(hdfsFS fs, hdfsFile f)
  804. {
  805. // JAVA EQUIVALENT
  806. // fos.flush();
  807. //Get the JNIEnv* corresponding to current thread
  808. JNIEnv* env = getJNIEnv();
  809. if (env == NULL) {
  810. errno = EINTERNAL;
  811. return -1;
  812. }
  813. //Parameters
  814. jobject jOutputStream = (jobject)(f ? f->file : 0);
  815. //Caught exception
  816. jthrowable jExc = NULL;
  817. //Sanity check
  818. if (!f || f->type != OUTPUT) {
  819. errno = EBADF;
  820. return -1;
  821. }
  822. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  823. HADOOP_OSTRM, "flush", "()V") != 0) {
  824. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  825. "FSDataInputStream::flush");
  826. return -1;
  827. }
  828. return 0;
  829. }
  830. int hdfsHFlush(hdfsFS fs, hdfsFile f)
  831. {
  832. //Get the JNIEnv* corresponding to current thread
  833. JNIEnv* env = getJNIEnv();
  834. if (env == NULL) {
  835. errno = EINTERNAL;
  836. return -1;
  837. }
  838. //Parameters
  839. jobject jOutputStream = (jobject)(f ? f->file : 0);
  840. //Caught exception
  841. jthrowable jExc = NULL;
  842. //Sanity check
  843. if (!f || f->type != OUTPUT) {
  844. errno = EBADF;
  845. return -1;
  846. }
  847. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  848. HADOOP_OSTRM, "hflush", "()V") != 0) {
  849. errno = errnoFromException(jExc, env, HADOOP_OSTRM "::hflush");
  850. return -1;
  851. }
  852. return 0;
  853. }
  854. int hdfsAvailable(hdfsFS fs, hdfsFile f)
  855. {
  856. // JAVA EQUIVALENT
  857. // fis.available();
  858. //Get the JNIEnv* corresponding to current thread
  859. JNIEnv* env = getJNIEnv();
  860. if (env == NULL) {
  861. errno = EINTERNAL;
  862. return -1;
  863. }
  864. //Parameters
  865. jobject jInputStream = (jobject)(f ? f->file : 0);
  866. //Caught exception
  867. jthrowable jExc = NULL;
  868. //Sanity check
  869. if (!f || f->type != INPUT) {
  870. errno = EBADF;
  871. return -1;
  872. }
  873. jint available = -1;
  874. jvalue jVal;
  875. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream,
  876. HADOOP_ISTRM, "available", "()I") != 0) {
  877. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  878. "FSDataInputStream::available");
  879. return -1;
  880. }
  881. available = jVal.i;
  882. return available;
  883. }
  884. int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  885. {
  886. //JAVA EQUIVALENT
  887. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath,
  888. // deleteSource = false, conf)
  889. //Get the JNIEnv* corresponding to current thread
  890. JNIEnv* env = getJNIEnv();
  891. if (env == NULL) {
  892. errno = EINTERNAL;
  893. return -1;
  894. }
  895. //Parameters
  896. jobject jSrcFS = (jobject)srcFS;
  897. jobject jDstFS = (jobject)dstFS;
  898. jobject jSrcPath = NULL;
  899. jobject jDstPath = NULL;
  900. jSrcPath = constructNewObjectOfPath(env, src);
  901. if (jSrcPath == NULL) {
  902. return -1;
  903. }
  904. jDstPath = constructNewObjectOfPath(env, dst);
  905. if (jDstPath == NULL) {
  906. destroyLocalReference(env, jSrcPath);
  907. return -1;
  908. }
  909. int retval = 0;
  910. //Create the org.apache.hadoop.conf.Configuration object
  911. jobject jConfiguration =
  912. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  913. if (jConfiguration == NULL) {
  914. fprintf(stderr, "Can't construct instance of class "
  915. "org.apache.hadoop.conf.Configuration\n");
  916. errno = EINTERNAL;
  917. destroyLocalReference(env, jSrcPath);
  918. destroyLocalReference(env, jDstPath);
  919. return -1;
  920. }
  921. //FileUtil::copy
  922. jboolean deleteSource = 0; //Only copy
  923. jvalue jVal;
  924. jthrowable jExc = NULL;
  925. if (invokeMethod(env, &jVal, &jExc, STATIC,
  926. NULL, "org/apache/hadoop/fs/FileUtil", "copy",
  927. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  928. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  929. jConfiguration) != 0) {
  930. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  931. "FileUtil::copy");
  932. retval = -1;
  933. goto done;
  934. }
  935. done:
  936. //Delete unnecessary local references
  937. destroyLocalReference(env, jConfiguration);
  938. destroyLocalReference(env, jSrcPath);
  939. destroyLocalReference(env, jDstPath);
  940. return retval;
  941. }
  942. int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  943. {
  944. //JAVA EQUIVALENT
  945. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath,
  946. // deleteSource = true, conf)
  947. //Get the JNIEnv* corresponding to current thread
  948. JNIEnv* env = getJNIEnv();
  949. if (env == NULL) {
  950. errno = EINTERNAL;
  951. return -1;
  952. }
  953. //Parameters
  954. jobject jSrcFS = (jobject)srcFS;
  955. jobject jDstFS = (jobject)dstFS;
  956. jobject jSrcPath = NULL;
  957. jobject jDstPath = NULL;
  958. jSrcPath = constructNewObjectOfPath(env, src);
  959. if (jSrcPath == NULL) {
  960. return -1;
  961. }
  962. jDstPath = constructNewObjectOfPath(env, dst);
  963. if (jDstPath == NULL) {
  964. destroyLocalReference(env, jSrcPath);
  965. return -1;
  966. }
  967. int retval = 0;
  968. //Create the org.apache.hadoop.conf.Configuration object
  969. jobject jConfiguration =
  970. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  971. if (jConfiguration == NULL) {
  972. fprintf(stderr, "Can't construct instance of class "
  973. "org.apache.hadoop.conf.Configuration\n");
  974. errno = EINTERNAL;
  975. destroyLocalReference(env, jSrcPath);
  976. destroyLocalReference(env, jDstPath);
  977. return -1;
  978. }
  979. //FileUtil::copy
  980. jboolean deleteSource = 1; //Delete src after copy
  981. jvalue jVal;
  982. jthrowable jExc = NULL;
  983. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
  984. "org/apache/hadoop/fs/FileUtil", "copy",
  985. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  986. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  987. jConfiguration) != 0) {
  988. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  989. "FileUtil::copy(move)");
  990. retval = -1;
  991. goto done;
  992. }
  993. done:
  994. //Delete unnecessary local references
  995. destroyLocalReference(env, jConfiguration);
  996. destroyLocalReference(env, jSrcPath);
  997. destroyLocalReference(env, jDstPath);
  998. return retval;
  999. }
  1000. int hdfsDelete(hdfsFS fs, const char* path, int recursive)
  1001. {
  1002. // JAVA EQUIVALENT:
  1003. // File f = new File(path);
  1004. // bool retval = fs.delete(f);
  1005. //Get the JNIEnv* corresponding to current thread
  1006. JNIEnv* env = getJNIEnv();
  1007. if (env == NULL) {
  1008. errno = EINTERNAL;
  1009. return -1;
  1010. }
  1011. jobject jFS = (jobject)fs;
  1012. //Create an object of java.io.File
  1013. jobject jPath = constructNewObjectOfPath(env, path);
  1014. if (jPath == NULL) {
  1015. return -1;
  1016. }
  1017. //Delete the file
  1018. jvalue jVal;
  1019. jthrowable jExc = NULL;
  1020. jboolean jRecursive = recursive;
  1021. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1022. "delete", "(Lorg/apache/hadoop/fs/Path;Z)Z",
  1023. jPath, jRecursive) != 0) {
  1024. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1025. "FileSystem::delete");
  1026. destroyLocalReference(env, jPath);
  1027. return -1;
  1028. }
  1029. //Delete unnecessary local references
  1030. destroyLocalReference(env, jPath);
  1031. return (jVal.z) ? 0 : -1;
  1032. }
  1033. int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
  1034. {
  1035. // JAVA EQUIVALENT:
  1036. // Path old = new Path(oldPath);
  1037. // Path new = new Path(newPath);
  1038. // fs.rename(old, new);
  1039. //Get the JNIEnv* corresponding to current thread
  1040. JNIEnv* env = getJNIEnv();
  1041. if (env == NULL) {
  1042. errno = EINTERNAL;
  1043. return -1;
  1044. }
  1045. jobject jFS = (jobject)fs;
  1046. //Create objects of org.apache.hadoop.fs.Path
  1047. jobject jOldPath = NULL;
  1048. jobject jNewPath = NULL;
  1049. jOldPath = constructNewObjectOfPath(env, oldPath);
  1050. if (jOldPath == NULL) {
  1051. return -1;
  1052. }
  1053. jNewPath = constructNewObjectOfPath(env, newPath);
  1054. if (jNewPath == NULL) {
  1055. destroyLocalReference(env, jOldPath);
  1056. return -1;
  1057. }
  1058. //Rename the file
  1059. jvalue jVal;
  1060. jthrowable jExc = NULL;
  1061. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, "rename",
  1062. JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_PATH), "Z"),
  1063. jOldPath, jNewPath) != 0) {
  1064. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1065. "FileSystem::rename");
  1066. destroyLocalReference(env, jOldPath);
  1067. destroyLocalReference(env, jNewPath);
  1068. return -1;
  1069. }
  1070. //Delete unnecessary local references
  1071. destroyLocalReference(env, jOldPath);
  1072. destroyLocalReference(env, jNewPath);
  1073. return (jVal.z) ? 0 : -1;
  1074. }
  1075. char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
  1076. {
  1077. // JAVA EQUIVALENT:
  1078. // Path p = fs.getWorkingDirectory();
  1079. // return p.toString()
  1080. //Get the JNIEnv* corresponding to current thread
  1081. JNIEnv* env = getJNIEnv();
  1082. if (env == NULL) {
  1083. errno = EINTERNAL;
  1084. return NULL;
  1085. }
  1086. jobject jFS = (jobject)fs;
  1087. jobject jPath = NULL;
  1088. jvalue jVal;
  1089. jthrowable jExc = NULL;
  1090. //FileSystem::getWorkingDirectory()
  1091. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
  1092. HADOOP_FS, "getWorkingDirectory",
  1093. "()Lorg/apache/hadoop/fs/Path;") != 0 ||
  1094. jVal.l == NULL) {
  1095. errno = errnoFromException(jExc, env, "FileSystem::"
  1096. "getWorkingDirectory");
  1097. return NULL;
  1098. }
  1099. jPath = jVal.l;
  1100. //Path::toString()
  1101. jstring jPathString;
  1102. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath,
  1103. "org/apache/hadoop/fs/Path", "toString",
  1104. "()Ljava/lang/String;") != 0) {
  1105. errno = errnoFromException(jExc, env, "Path::toString");
  1106. destroyLocalReference(env, jPath);
  1107. return NULL;
  1108. }
  1109. jPathString = jVal.l;
  1110. const char *jPathChars = (const char*)
  1111. ((*env)->GetStringUTFChars(env, jPathString, NULL));
  1112. //Copy to user-provided buffer
  1113. strncpy(buffer, jPathChars, bufferSize);
  1114. //Delete unnecessary local references
  1115. (*env)->ReleaseStringUTFChars(env, jPathString, jPathChars);
  1116. destroyLocalReference(env, jPathString);
  1117. destroyLocalReference(env, jPath);
  1118. return buffer;
  1119. }
  1120. int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
  1121. {
  1122. // JAVA EQUIVALENT:
  1123. // fs.setWorkingDirectory(Path(path));
  1124. //Get the JNIEnv* corresponding to current thread
  1125. JNIEnv* env = getJNIEnv();
  1126. if (env == NULL) {
  1127. errno = EINTERNAL;
  1128. return -1;
  1129. }
  1130. jobject jFS = (jobject)fs;
  1131. int retval = 0;
  1132. jthrowable jExc = NULL;
  1133. //Create an object of org.apache.hadoop.fs.Path
  1134. jobject jPath = constructNewObjectOfPath(env, path);
  1135. if (jPath == NULL) {
  1136. return -1;
  1137. }
  1138. //FileSystem::setWorkingDirectory()
  1139. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1140. "setWorkingDirectory",
  1141. "(Lorg/apache/hadoop/fs/Path;)V", jPath) != 0) {
  1142. errno = errnoFromException(jExc, env, "FileSystem::"
  1143. "setWorkingDirectory");
  1144. retval = -1;
  1145. }
  1146. //Delete unnecessary local references
  1147. destroyLocalReference(env, jPath);
  1148. return retval;
  1149. }
  1150. int hdfsCreateDirectory(hdfsFS fs, const char* path)
  1151. {
  1152. // JAVA EQUIVALENT:
  1153. // fs.mkdirs(new Path(path));
  1154. //Get the JNIEnv* corresponding to current thread
  1155. JNIEnv* env = getJNIEnv();
  1156. if (env == NULL) {
  1157. errno = EINTERNAL;
  1158. return -1;
  1159. }
  1160. jobject jFS = (jobject)fs;
  1161. //Create an object of org.apache.hadoop.fs.Path
  1162. jobject jPath = constructNewObjectOfPath(env, path);
  1163. if (jPath == NULL) {
  1164. return -1;
  1165. }
  1166. //Create the directory
  1167. jvalue jVal;
  1168. jVal.z = 0;
  1169. jthrowable jExc = NULL;
  1170. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1171. "mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z",
  1172. jPath) != 0) {
  1173. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1174. "FileSystem::mkdirs");
  1175. goto done;
  1176. }
  1177. done:
  1178. //Delete unnecessary local references
  1179. destroyLocalReference(env, jPath);
  1180. return (jVal.z) ? 0 : -1;
  1181. }
  1182. int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
  1183. {
  1184. // JAVA EQUIVALENT:
  1185. // fs.setReplication(new Path(path), replication);
  1186. //Get the JNIEnv* corresponding to current thread
  1187. JNIEnv* env = getJNIEnv();
  1188. if (env == NULL) {
  1189. errno = EINTERNAL;
  1190. return -1;
  1191. }
  1192. jobject jFS = (jobject)fs;
  1193. //Create an object of org.apache.hadoop.fs.Path
  1194. jobject jPath = constructNewObjectOfPath(env, path);
  1195. if (jPath == NULL) {
  1196. return -1;
  1197. }
  1198. //Create the directory
  1199. jvalue jVal;
  1200. jthrowable jExc = NULL;
  1201. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1202. "setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
  1203. jPath, replication) != 0) {
  1204. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1205. "FileSystem::setReplication");
  1206. goto done;
  1207. }
  1208. done:
  1209. //Delete unnecessary local references
  1210. destroyLocalReference(env, jPath);
  1211. return (jVal.z) ? 0 : -1;
  1212. }
  1213. int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
  1214. {
  1215. // JAVA EQUIVALENT:
  1216. // fs.setOwner(path, owner, group)
  1217. //Get the JNIEnv* corresponding to current thread
  1218. JNIEnv* env = getJNIEnv();
  1219. if (env == NULL) {
  1220. errno = EINTERNAL;
  1221. return -1;
  1222. }
  1223. if (owner == NULL && group == NULL) {
  1224. fprintf(stderr, "Both owner and group cannot be null in chown");
  1225. errno = EINVAL;
  1226. return -1;
  1227. }
  1228. jobject jFS = (jobject)fs;
  1229. jobject jPath = constructNewObjectOfPath(env, path);
  1230. if (jPath == NULL) {
  1231. return -1;
  1232. }
  1233. jstring jOwnerString = (*env)->NewStringUTF(env, owner);
  1234. jstring jGroupString = (*env)->NewStringUTF(env, group);
  1235. //Create the directory
  1236. int ret = 0;
  1237. jthrowable jExc = NULL;
  1238. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1239. "setOwner", JMETHOD3(JPARAM(HADOOP_PATH), JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID),
  1240. jPath, jOwnerString, jGroupString) != 0) {
  1241. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1242. "FileSystem::setOwner");
  1243. ret = -1;
  1244. goto done;
  1245. }
  1246. done:
  1247. destroyLocalReference(env, jPath);
  1248. destroyLocalReference(env, jOwnerString);
  1249. destroyLocalReference(env, jGroupString);
  1250. return ret;
  1251. }
  1252. int hdfsChmod(hdfsFS fs, const char* path, short mode)
  1253. {
  1254. // JAVA EQUIVALENT:
  1255. // fs.setPermission(path, FsPermission)
  1256. //Get the JNIEnv* corresponding to current thread
  1257. JNIEnv* env = getJNIEnv();
  1258. if (env == NULL) {
  1259. errno = EINTERNAL;
  1260. return -1;
  1261. }
  1262. jobject jFS = (jobject)fs;
  1263. // construct jPerm = FsPermission.createImmutable(short mode);
  1264. jshort jmode = mode;
  1265. jobject jPermObj =
  1266. constructNewObjectOfClass(env, NULL, HADOOP_FSPERM,"(S)V",jmode);
  1267. if (jPermObj == NULL) {
  1268. return -2;
  1269. }
  1270. //Create an object of org.apache.hadoop.fs.Path
  1271. jobject jPath = constructNewObjectOfPath(env, path);
  1272. if (jPath == NULL) {
  1273. destroyLocalReference(env, jPermObj);
  1274. return -3;
  1275. }
  1276. //Create the directory
  1277. int ret = 0;
  1278. jthrowable jExc = NULL;
  1279. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1280. "setPermission", JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSPERM), JAVA_VOID),
  1281. jPath, jPermObj) != 0) {
  1282. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1283. "FileSystem::setPermission");
  1284. ret = -1;
  1285. goto done;
  1286. }
  1287. done:
  1288. destroyLocalReference(env, jPath);
  1289. destroyLocalReference(env, jPermObj);
  1290. return ret;
  1291. }
  1292. int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
  1293. {
  1294. // JAVA EQUIVALENT:
  1295. // fs.setTimes(src, mtime, atime)
  1296. //Get the JNIEnv* corresponding to current thread
  1297. JNIEnv* env = getJNIEnv();
  1298. if (env == NULL) {
  1299. errno = EINTERNAL;
  1300. return -1;
  1301. }
  1302. jobject jFS = (jobject)fs;
  1303. //Create an object of org.apache.hadoop.fs.Path
  1304. jobject jPath = constructNewObjectOfPath(env, path);
  1305. if (jPath == NULL) {
  1306. fprintf(stderr, "could not construct path object\n");
  1307. return -2;
  1308. }
  1309. jlong jmtime = mtime * (jlong)1000;
  1310. jlong jatime = atime * (jlong)1000;
  1311. int ret = 0;
  1312. jthrowable jExc = NULL;
  1313. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1314. "setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J", JAVA_VOID),
  1315. jPath, jmtime, jatime) != 0) {
  1316. fprintf(stderr, "call to setTime failed\n");
  1317. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1318. "FileSystem::setTimes");
  1319. ret = -1;
  1320. goto done;
  1321. }
  1322. done:
  1323. destroyLocalReference(env, jPath);
  1324. return ret;
  1325. }
  1326. char***
  1327. hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
  1328. {
  1329. // JAVA EQUIVALENT:
  1330. // fs.getFileBlockLoctions(new Path(path), start, length);
  1331. //Get the JNIEnv* corresponding to current thread
  1332. JNIEnv* env = getJNIEnv();
  1333. if (env == NULL) {
  1334. errno = EINTERNAL;
  1335. return NULL;
  1336. }
  1337. jobject jFS = (jobject)fs;
  1338. //Create an object of org.apache.hadoop.fs.Path
  1339. jobject jPath = constructNewObjectOfPath(env, path);
  1340. if (jPath == NULL) {
  1341. return NULL;
  1342. }
  1343. jvalue jFSVal;
  1344. jthrowable jFSExc = NULL;
  1345. if (invokeMethod(env, &jFSVal, &jFSExc, INSTANCE, jFS,
  1346. HADOOP_FS, "getFileStatus",
  1347. "(Lorg/apache/hadoop/fs/Path;)"
  1348. "Lorg/apache/hadoop/fs/FileStatus;",
  1349. jPath) != 0) {
  1350. errno = errnoFromException(jFSExc, env, "org.apache.hadoop.fs."
  1351. "FileSystem::getFileStatus");
  1352. destroyLocalReference(env, jPath);
  1353. return NULL;
  1354. }
  1355. jobject jFileStatus = jFSVal.l;
  1356. //org.apache.hadoop.fs.FileSystem::getFileBlockLocations
  1357. char*** blockHosts = NULL;
  1358. jobjectArray jBlockLocations;;
  1359. jvalue jVal;
  1360. jthrowable jExc = NULL;
  1361. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
  1362. HADOOP_FS, "getFileBlockLocations",
  1363. "(Lorg/apache/hadoop/fs/FileStatus;JJ)"
  1364. "[Lorg/apache/hadoop/fs/BlockLocation;",
  1365. jFileStatus, start, length) != 0) {
  1366. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1367. "FileSystem::getFileBlockLocations");
  1368. destroyLocalReference(env, jPath);
  1369. destroyLocalReference(env, jFileStatus);
  1370. return NULL;
  1371. }
  1372. jBlockLocations = jVal.l;
  1373. //Figure out no of entries in jBlockLocations
  1374. //Allocate memory and add NULL at the end
  1375. jsize jNumFileBlocks = (*env)->GetArrayLength(env, jBlockLocations);
  1376. blockHosts = malloc(sizeof(char**) * (jNumFileBlocks+1));
  1377. if (blockHosts == NULL) {
  1378. errno = ENOMEM;
  1379. goto done;
  1380. }
  1381. blockHosts[jNumFileBlocks] = NULL;
  1382. if (jNumFileBlocks == 0) {
  1383. errno = 0;
  1384. goto done;
  1385. }
  1386. //Now parse each block to get hostnames
  1387. int i = 0;
  1388. for (i=0; i < jNumFileBlocks; ++i) {
  1389. jobject jFileBlock =
  1390. (*env)->GetObjectArrayElement(env, jBlockLocations, i);
  1391. jvalue jVal;
  1392. jobjectArray jFileBlockHosts;
  1393. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFileBlock, HADOOP_BLK_LOC,
  1394. "getHosts", "()[Ljava/lang/String;") ||
  1395. jVal.l == NULL) {
  1396. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1397. "BlockLocation::getHosts");
  1398. destroyLocalReference(env, jPath);
  1399. destroyLocalReference(env, jFileStatus);
  1400. destroyLocalReference(env, jBlockLocations);
  1401. return NULL;
  1402. }
  1403. jFileBlockHosts = jVal.l;
  1404. //Figure out no of hosts in jFileBlockHosts
  1405. //Allocate memory and add NULL at the end
  1406. jsize jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts);
  1407. blockHosts[i] = malloc(sizeof(char*) * (jNumBlockHosts+1));
  1408. if (blockHosts[i] == NULL) {
  1409. int x = 0;
  1410. for (x=0; x < i; ++x) {
  1411. free(blockHosts[x]);
  1412. }
  1413. free(blockHosts);
  1414. errno = ENOMEM;
  1415. goto done;
  1416. }
  1417. blockHosts[i][jNumBlockHosts] = NULL;
  1418. //Now parse each hostname
  1419. int j = 0;
  1420. const char *hostName;
  1421. for (j=0; j < jNumBlockHosts; ++j) {
  1422. jstring jHost =
  1423. (*env)->GetObjectArrayElement(env, jFileBlockHosts, j);
  1424. hostName =
  1425. (const char*)((*env)->GetStringUTFChars(env, jHost, NULL));
  1426. blockHosts[i][j] = strdup(hostName);
  1427. (*env)->ReleaseStringUTFChars(env, jHost, hostName);
  1428. destroyLocalReference(env, jHost);
  1429. }
  1430. destroyLocalReference(env, jFileBlockHosts);
  1431. }
  1432. done:
  1433. //Delete unnecessary local references
  1434. destroyLocalReference(env, jPath);
  1435. destroyLocalReference(env, jFileStatus);
  1436. destroyLocalReference(env, jBlockLocations);
  1437. return blockHosts;
  1438. }
  1439. void hdfsFreeHosts(char ***blockHosts)
  1440. {
  1441. int i, j;
  1442. for (i=0; blockHosts[i]; i++) {
  1443. for (j=0; blockHosts[i][j]; j++) {
  1444. free(blockHosts[i][j]);
  1445. }
  1446. free(blockHosts[i]);
  1447. }
  1448. free(blockHosts);
  1449. }
  1450. tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
  1451. {
  1452. // JAVA EQUIVALENT:
  1453. // fs.getDefaultBlockSize();
  1454. //Get the JNIEnv* corresponding to current thread
  1455. JNIEnv* env = getJNIEnv();
  1456. if (env == NULL) {
  1457. errno = EINTERNAL;
  1458. return -1;
  1459. }
  1460. jobject jFS = (jobject)fs;
  1461. //FileSystem::getDefaultBlockSize()
  1462. tOffset blockSize = -1;
  1463. jvalue jVal;
  1464. jthrowable jExc = NULL;
  1465. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1466. "getDefaultBlockSize", "()J") != 0) {
  1467. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1468. "FileSystem::getDefaultBlockSize");
  1469. return -1;
  1470. }
  1471. blockSize = jVal.j;
  1472. return blockSize;
  1473. }
  1474. tOffset hdfsGetCapacity(hdfsFS fs)
  1475. {
  1476. // JAVA EQUIVALENT:
  1477. // FsStatus fss = fs.getStatus();
  1478. // return Fss.getCapacity();
  1479. //Get the JNIEnv* corresponding to current thread
  1480. JNIEnv* env = getJNIEnv();
  1481. if (env == NULL) {
  1482. errno = EINTERNAL;
  1483. return -1;
  1484. }
  1485. jobject jFS = (jobject)fs;
  1486. //FileSystem::getStatus
  1487. jvalue jVal;
  1488. jthrowable jExc = NULL;
  1489. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1490. "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;") != 0) {
  1491. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1492. "FileSystem::getStatus");
  1493. return -1;
  1494. }
  1495. jobject fss = (jobject)jVal.l;
  1496. if (invokeMethod(env, &jVal, &jExc, INSTANCE, fss, HADOOP_FSSTATUS,
  1497. "getCapacity", "()J") != 0) {
  1498. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1499. "FsStatus::getCapacity");
  1500. destroyLocalReference(env, fss);
  1501. return -1;
  1502. }
  1503. destroyLocalReference(env, fss);
  1504. return jVal.j;
  1505. }
  1506. tOffset hdfsGetUsed(hdfsFS fs)
  1507. {
  1508. // JAVA EQUIVALENT:
  1509. // FsStatus fss = fs.getStatus();
  1510. // return Fss.getUsed();
  1511. //Get the JNIEnv* corresponding to current thread
  1512. JNIEnv* env = getJNIEnv();
  1513. if (env == NULL) {
  1514. errno = EINTERNAL;
  1515. return -1;
  1516. }
  1517. jobject jFS = (jobject)fs;
  1518. //FileSystem::getStatus
  1519. jvalue jVal;
  1520. jthrowable jExc = NULL;
  1521. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1522. "getStatus", "()Lorg/apache/hadoop/fs/FsStatus;") != 0) {
  1523. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1524. "FileSystem::getStatus");
  1525. return -1;
  1526. }
  1527. jobject fss = (jobject)jVal.l;
  1528. if (invokeMethod(env, &jVal, &jExc, INSTANCE, fss, HADOOP_FSSTATUS,
  1529. "getUsed", "()J") != 0) {
  1530. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1531. "FsStatus::getUsed");
  1532. destroyLocalReference(env, fss);
  1533. return -1;
  1534. }
  1535. destroyLocalReference(env, fss);
  1536. return jVal.j;
  1537. }
  1538. static int
  1539. getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
  1540. {
  1541. jvalue jVal;
  1542. jthrowable jExc = NULL;
  1543. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1544. HADOOP_STAT, "isDir", "()Z") != 0) {
  1545. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1546. "FileStatus::isDir");
  1547. return -1;
  1548. }
  1549. fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile;
  1550. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1551. HADOOP_STAT, "getReplication", "()S") != 0) {
  1552. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1553. "FileStatus::getReplication");
  1554. return -1;
  1555. }
  1556. fileInfo->mReplication = jVal.s;
  1557. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1558. HADOOP_STAT, "getBlockSize", "()J") != 0) {
  1559. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1560. "FileStatus::getBlockSize");
  1561. return -1;
  1562. }
  1563. fileInfo->mBlockSize = jVal.j;
  1564. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1565. HADOOP_STAT, "getModificationTime", "()J") != 0) {
  1566. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1567. "FileStatus::getModificationTime");
  1568. return -1;
  1569. }
  1570. fileInfo->mLastMod = (tTime) (jVal.j / 1000);
  1571. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1572. HADOOP_STAT, "getAccessTime", "()J") != 0) {
  1573. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1574. "FileStatus::getAccessTime");
  1575. return -1;
  1576. }
  1577. fileInfo->mLastAccess = (tTime) (jVal.j / 1000);
  1578. if (fileInfo->mKind == kObjectKindFile) {
  1579. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1580. HADOOP_STAT, "getLen", "()J") != 0) {
  1581. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1582. "FileStatus::getLen");
  1583. return -1;
  1584. }
  1585. fileInfo->mSize = jVal.j;
  1586. }
  1587. jobject jPath;
  1588. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1589. "getPath", "()Lorg/apache/hadoop/fs/Path;") ||
  1590. jVal.l == NULL) {
  1591. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1592. "Path::getPath");
  1593. return -1;
  1594. }
  1595. jPath = jVal.l;
  1596. jstring jPathName;
  1597. const char *cPathName;
  1598. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath, HADOOP_PATH,
  1599. "toString", "()Ljava/lang/String;")) {
  1600. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1601. "Path::toString");
  1602. destroyLocalReference(env, jPath);
  1603. return -1;
  1604. }
  1605. jPathName = jVal.l;
  1606. cPathName = (const char*) ((*env)->GetStringUTFChars(env, jPathName, NULL));
  1607. fileInfo->mName = strdup(cPathName);
  1608. (*env)->ReleaseStringUTFChars(env, jPathName, cPathName);
  1609. destroyLocalReference(env, jPath);
  1610. destroyLocalReference(env, jPathName);
  1611. jstring jUserName;
  1612. const char* cUserName;
  1613. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1614. "getOwner", "()Ljava/lang/String;")) {
  1615. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1616. "FileStatus::getOwner failed!\n");
  1617. errno = EINTERNAL;
  1618. return -1;
  1619. }
  1620. jUserName = jVal.l;
  1621. cUserName = (const char*) ((*env)->GetStringUTFChars(env, jUserName, NULL));
  1622. fileInfo->mOwner = strdup(cUserName);
  1623. (*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
  1624. destroyLocalReference(env, jUserName);
  1625. jstring jGroupName;
  1626. const char* cGroupName;
  1627. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1628. "getGroup", "()Ljava/lang/String;")) {
  1629. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1630. "FileStatus::getGroup failed!\n");
  1631. errno = EINTERNAL;
  1632. return -1;
  1633. }
  1634. jGroupName = jVal.l;
  1635. cGroupName = (const char*) ((*env)->GetStringUTFChars(env, jGroupName, NULL));
  1636. fileInfo->mGroup = strdup(cGroupName);
  1637. (*env)->ReleaseStringUTFChars(env, jGroupName, cGroupName);
  1638. destroyLocalReference(env, jGroupName);
  1639. jobject jPermission;
  1640. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1641. "getPermission", "()Lorg/apache/hadoop/fs/permission/FsPermission;") ||
  1642. jVal.l == NULL) {
  1643. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1644. "FileStatus::getPermission failed!\n");
  1645. errno = EINTERNAL;
  1646. return -1;
  1647. }
  1648. jPermission = jVal.l;
  1649. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPermission, HADOOP_FSPERM,
  1650. "toShort", "()S") != 0) {
  1651. fprintf(stderr, "Call to org.apache.hadoop.fs.permission."
  1652. "FsPermission::toShort failed!\n");
  1653. errno = EINTERNAL;
  1654. return -1;
  1655. }
  1656. fileInfo->mPermissions = jVal.s;
  1657. destroyLocalReference(env, jPermission);
  1658. return 0;
  1659. }
  1660. static int
  1661. getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo *fileInfo)
  1662. {
  1663. // JAVA EQUIVALENT:
  1664. // fs.isDirectory(f)
  1665. // fs.getModificationTime()
  1666. // fs.getAccessTime()
  1667. // fs.getLength(f)
  1668. // f.getPath()
  1669. // f.getOwner()
  1670. // f.getGroup()
  1671. // f.getPermission().toShort()
  1672. jobject jStat;
  1673. jvalue jVal;
  1674. jthrowable jExc = NULL;
  1675. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1676. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
  1677. jPath) != 0) {
  1678. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1679. "FileSystem::exists");
  1680. return -1;
  1681. }
  1682. if (jVal.z == 0) {
  1683. errno = ENOENT;
  1684. return -1;
  1685. }
  1686. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1687. "getFileStatus", JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_STAT)),
  1688. jPath) != 0) {
  1689. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1690. "FileSystem::getFileStatus");
  1691. return -1;
  1692. }
  1693. jStat = jVal.l;
  1694. int ret = getFileInfoFromStat(env, jStat, fileInfo);
  1695. destroyLocalReference(env, jStat);
  1696. return ret;
  1697. }
  1698. hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
  1699. {
  1700. // JAVA EQUIVALENT:
  1701. // Path p(path);
  1702. // Path []pathList = fs.listPaths(p)
  1703. // foreach path in pathList
  1704. // getFileInfo(path)
  1705. //Get the JNIEnv* corresponding to current thread
  1706. JNIEnv* env = getJNIEnv();
  1707. if (env == NULL) {
  1708. errno = EINTERNAL;
  1709. return NULL;
  1710. }
  1711. jobject jFS = (jobject)fs;
  1712. //Create an object of org.apache.hadoop.fs.Path
  1713. jobject jPath = constructNewObjectOfPath(env, path);
  1714. if (jPath == NULL) {
  1715. return NULL;
  1716. }
  1717. hdfsFileInfo *pathList = 0;
  1718. jobjectArray jPathList = NULL;
  1719. jvalue jVal;
  1720. jthrowable jExc = NULL;
  1721. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS, "listStatus",
  1722. JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_STAT)),
  1723. jPath) != 0) {
  1724. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1725. "FileSystem::listStatus");
  1726. destroyLocalReference(env, jPath);
  1727. return NULL;
  1728. }
  1729. jPathList = jVal.l;
  1730. //Figure out no of entries in that directory
  1731. jsize jPathListSize = (*env)->GetArrayLength(env, jPathList);
  1732. *numEntries = jPathListSize;
  1733. if (jPathListSize == 0) {
  1734. errno = 0;
  1735. goto done;
  1736. }
  1737. //Allocate memory
  1738. pathList = calloc(jPathListSize, sizeof(hdfsFileInfo));
  1739. if (pathList == NULL) {
  1740. errno = ENOMEM;
  1741. goto done;
  1742. }
  1743. //Save path information in pathList
  1744. jsize i;
  1745. jobject tmpStat;
  1746. for (i=0; i < jPathListSize; ++i) {
  1747. tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i);
  1748. if (getFileInfoFromStat(env, tmpStat, &pathList[i])) {
  1749. hdfsFreeFileInfo(pathList, jPathListSize);
  1750. destroyLocalReference(env, tmpStat);
  1751. pathList = NULL;
  1752. goto done;
  1753. }
  1754. destroyLocalReference(env, tmpStat);
  1755. }
  1756. done:
  1757. //Delete unnecessary local references
  1758. destroyLocalReference(env, jPath);
  1759. destroyLocalReference(env, jPathList);
  1760. return pathList;
  1761. }
  1762. hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
  1763. {
  1764. // JAVA EQUIVALENT:
  1765. // File f(path);
  1766. // fs.isDirectory(f)
  1767. // fs.lastModified() ??
  1768. // fs.getLength(f)
  1769. // f.getPath()
  1770. //Get the JNIEnv* corresponding to current thread
  1771. JNIEnv* env = getJNIEnv();
  1772. if (env == NULL) {
  1773. errno = EINTERNAL;
  1774. return NULL;
  1775. }
  1776. jobject jFS = (jobject)fs;
  1777. //Create an object of org.apache.hadoop.fs.Path
  1778. jobject jPath = constructNewObjectOfPath(env, path);
  1779. if (jPath == NULL) {
  1780. return NULL;
  1781. }
  1782. hdfsFileInfo *fileInfo = calloc(1, sizeof(hdfsFileInfo));
  1783. if (getFileInfo(env, jFS, jPath, fileInfo)) {
  1784. hdfsFreeFileInfo(fileInfo, 1);
  1785. fileInfo = NULL;
  1786. goto done;
  1787. }
  1788. done:
  1789. //Delete unnecessary local references
  1790. destroyLocalReference(env, jPath);
  1791. return fileInfo;
  1792. }
  1793. void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
  1794. {
  1795. //Free the mName, mOwner, and mGroup
  1796. int i;
  1797. for (i=0; i < numEntries; ++i) {
  1798. if (hdfsFileInfo[i].mName) {
  1799. free(hdfsFileInfo[i].mName);
  1800. }
  1801. if (hdfsFileInfo[i].mOwner) {
  1802. free(hdfsFileInfo[i].mOwner);
  1803. }
  1804. if (hdfsFileInfo[i].mGroup) {
  1805. free(hdfsFileInfo[i].mGroup);
  1806. }
  1807. }
  1808. //Free entire block
  1809. free(hdfsFileInfo);
  1810. }
  1811. /**
  1812. * vim: ts=4: sw=4: et:
  1813. */