1
0

hdfs.c 57 KB

12345678910111213141516171819202122232425262728293031323334353637383940414243444546474849505152535455565758596061626364656667686970717273747576777879808182838485868788899091929394959697989910010110210310410510610710810911011111211311411511611711811912012112212312412512612712812913013113213313413513613713813914014114214314414514614714814915015115215315415515615715815916016116216316416516616716816917017117217317417517617717817918018118218318418518618718818919019119219319419519619719819920020120220320420520620720820921021121221321421521621721821922022122222322422522622722822923023123223323423523623723823924024124224324424524624724824925025125225325425525625725825926026126226326426526626726826927027127227327427527627727827928028128228328428528628728828929029129229329429529629729829930030130230330430530630730830931031131231331431531631731831932032132232332432532632732832933033133233333433533633733833934034134234334434534634734834935035135235335435535635735835936036136236336436536636736836937037137237337437537637737837938038138238338438538638738838939039139239339439539639739839940040140240340440540640740840941041141241341441541641741841942042142242342442542642742842943043143243343443543643743843944044144244344444544644744844945045145245345445545645745845946046146246346446546646746846947047147247347447547647747847948048148248348448548648748848949049149249349449549649749849950050150250350450550650750850951051151251351451551651751851952052152252352452552652752852953053153253353453553653753853954054154254354454554654754854955055155255355455555655755855956056156256356456556656756856957057157257357457557657757857958058158258358458558658758858959059159259359459559659759859960060160260360460560660760860961061161261361461561661761861962062162262362462562662762862963063163263363463563663763863964064164264364464564664764864965065165265365465565665765865966066166266366466566666766866967067167267367467567667767867968068168268368468568668768868969069169269369469569669769869970070170270370470570670770870971071171271371471571671771871972072172272372472572672772872973073173273373473573673773873974074174274374474574674774874975075175275375475575675775875976076176276376476576676776876977077177277377477577677777877978078178278378478578678778878979079179279379479579679779879980080180280380480580680780880981081181281381481581681781881982082182282382482582682782882983083183283383483583683783883984084184284384484584684784884985085185285385485585685785885986086186286386486586686786886987087187287387487587687787887988088188288388488588688788888989089189289389489589689789889990090190290390490590690790890991091191291391491591691791891992092192292392492592692792892993093193293393493593693793893994094194294394494594694794894995095195295395495595695795895996096196296396496596696796896997097197297397497597697797897998098198298398498598698798898999099199299399499599699799899910001001100210031004100510061007100810091010101110121013101410151016101710181019102010211022102310241025102610271028102910301031103210331034103510361037103810391040104110421043104410451046104710481049105010511052105310541055105610571058105910601061106210631064106510661067106810691070107110721073107410751076107710781079108010811082108310841085108610871088108910901091109210931094109510961097109810991100110111021103110411051106110711081109111011111112111311141115111611171118111911201121112211231124112511261127112811291130113111321133113411351136113711381139114011411142114311441145114611471148114911501151115211531154115511561157115811591160116111621163116411651166116711681169117011711172117311741175117611771178117911801181118211831184118511861187118811891190119111921193119411951196119711981199120012011202120312041205120612071208120912101211121212131214121512161217121812191220122112221223122412251226122712281229123012311232123312341235123612371238123912401241124212431244124512461247124812491250125112521253125412551256125712581259126012611262126312641265126612671268126912701271127212731274127512761277127812791280128112821283128412851286128712881289129012911292129312941295129612971298129913001301130213031304130513061307130813091310131113121313131413151316131713181319132013211322132313241325132613271328132913301331133213331334133513361337133813391340134113421343134413451346134713481349135013511352135313541355135613571358135913601361136213631364136513661367136813691370137113721373137413751376137713781379138013811382138313841385138613871388138913901391139213931394139513961397139813991400140114021403140414051406140714081409141014111412141314141415141614171418141914201421142214231424142514261427142814291430143114321433143414351436143714381439144014411442144314441445144614471448144914501451145214531454145514561457145814591460146114621463146414651466146714681469147014711472147314741475147614771478147914801481148214831484148514861487148814891490149114921493149414951496149714981499150015011502150315041505150615071508150915101511151215131514151515161517151815191520152115221523152415251526152715281529153015311532153315341535153615371538153915401541154215431544154515461547154815491550155115521553155415551556155715581559156015611562156315641565156615671568156915701571157215731574157515761577157815791580158115821583158415851586158715881589159015911592159315941595159615971598159916001601160216031604160516061607160816091610161116121613161416151616161716181619162016211622162316241625162616271628162916301631163216331634163516361637163816391640164116421643164416451646164716481649165016511652165316541655165616571658165916601661166216631664166516661667166816691670167116721673167416751676167716781679168016811682168316841685168616871688168916901691169216931694169516961697169816991700170117021703170417051706170717081709171017111712171317141715171617171718171917201721172217231724172517261727172817291730173117321733173417351736173717381739174017411742174317441745174617471748174917501751175217531754175517561757175817591760176117621763176417651766176717681769177017711772177317741775177617771778177917801781178217831784178517861787178817891790179117921793179417951796179717981799180018011802180318041805180618071808180918101811181218131814181518161817181818191820182118221823182418251826182718281829183018311832183318341835183618371838183918401841184218431844184518461847184818491850185118521853185418551856185718581859186018611862186318641865186618671868186918701871187218731874187518761877187818791880188118821883188418851886188718881889189018911892189318941895189618971898189919001901190219031904190519061907190819091910191119121913191419151916191719181919192019211922192319241925192619271928192919301931193219331934193519361937193819391940194119421943194419451946194719481949195019511952195319541955195619571958195919601961196219631964196519661967196819691970197119721973197419751976197719781979198019811982198319841985198619871988198919901991199219931994199519961997199819992000200120022003200420052006200720082009201020112012201320142015201620172018201920202021202220232024202520262027202820292030203120322033203420352036203720382039204020412042
  1. /**
  2. * Licensed to the Apache Software Foundation (ASF) under one
  3. * or more contributor license agreements. See the NOTICE file
  4. * distributed with this work for additional information
  5. * regarding copyright ownership. The ASF licenses this file
  6. * to you under the Apache License, Version 2.0 (the
  7. * "License"); you may not use this file except in compliance
  8. * with the License. You may obtain a copy of the License at
  9. *
  10. * http://www.apache.org/licenses/LICENSE-2.0
  11. *
  12. * Unless required by applicable law or agreed to in writing, software
  13. * distributed under the License is distributed on an "AS IS" BASIS,
  14. * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
  15. * See the License for the specific language governing permissions and
  16. * limitations under the License.
  17. */
  18. #include "hdfs.h"
  19. #include "hdfsJniHelper.h"
  20. /* Some frequently used Java paths */
  21. #define HADOOP_CONF "org/apache/hadoop/conf/Configuration"
  22. #define HADOOP_PATH "org/apache/hadoop/fs/Path"
  23. #define HADOOP_LOCALFS "org/apache/hadoop/fs/LocalFileSystem"
  24. #define HADOOP_FS "org/apache/hadoop/fs/FileSystem"
  25. #define HADOOP_BLK_LOC "org/apache/hadoop/fs/BlockLocation"
  26. #define HADOOP_DFS "org/apache/hadoop/hdfs/DistributedFileSystem"
  27. #define HADOOP_ISTRM "org/apache/hadoop/fs/FSDataInputStream"
  28. #define HADOOP_OSTRM "org/apache/hadoop/fs/FSDataOutputStream"
  29. #define HADOOP_STAT "org/apache/hadoop/fs/FileStatus"
  30. #define HADOOP_FSPERM "org/apache/hadoop/fs/permission/FsPermission"
  31. #define JAVA_NET_ISA "java/net/InetSocketAddress"
  32. #define JAVA_NET_URI "java/net/URI"
  33. #define JAVA_STRING "java/lang/String"
  34. #define JAVA_VOID "V"
  35. /* Macros for constructing method signatures */
  36. #define JPARAM(X) "L" X ";"
  37. #define JARRPARAM(X) "[L" X ";"
  38. #define JMETHOD1(X, R) "(" X ")" R
  39. #define JMETHOD2(X, Y, R) "(" X Y ")" R
  40. #define JMETHOD3(X, Y, Z, R) "(" X Y Z")" R
  41. /**
  42. * hdfsJniEnv: A wrapper struct to be used as 'value'
  43. * while saving thread -> JNIEnv* mappings
  44. */
  45. typedef struct
  46. {
  47. JNIEnv* env;
  48. } hdfsJniEnv;
  49. /**
  50. * Helper function to destroy a local reference of java.lang.Object
  51. * @param env: The JNIEnv pointer.
  52. * @param jFile: The local reference of java.lang.Object object
  53. * @return None.
  54. */
  55. static void destroyLocalReference(JNIEnv *env, jobject jObject)
  56. {
  57. if (jObject)
  58. (*env)->DeleteLocalRef(env, jObject);
  59. }
  60. /**
  61. * Helper function to create a org.apache.hadoop.fs.Path object.
  62. * @param env: The JNIEnv pointer.
  63. * @param path: The file-path for which to construct org.apache.hadoop.fs.Path
  64. * object.
  65. * @return Returns a jobject on success and NULL on error.
  66. */
  67. static jobject constructNewObjectOfPath(JNIEnv *env, const char *path)
  68. {
  69. //Construct a java.lang.String object
  70. jstring jPathString = (*env)->NewStringUTF(env, path);
  71. //Construct the org.apache.hadoop.fs.Path object
  72. jobject jPath =
  73. constructNewObjectOfClass(env, NULL, "org/apache/hadoop/fs/Path",
  74. "(Ljava/lang/String;)V", jPathString);
  75. if (jPath == NULL) {
  76. fprintf(stderr, "Can't construct instance of class "
  77. "org.apache.hadoop.fs.Path for %s\n", path);
  78. errno = EINTERNAL;
  79. return NULL;
  80. }
  81. // Destroy the local reference to the java.lang.String object
  82. destroyLocalReference(env, jPathString);
  83. return jPath;
  84. }
  85. /**
  86. * Helper function to translate an exception into a meaningful errno value.
  87. * @param exc: The exception.
  88. * @param env: The JNIEnv Pointer.
  89. * @param method: The name of the method that threw the exception. This
  90. * may be format string to be used in conjuction with additional arguments.
  91. * @return Returns a meaningful errno value if possible, or EINTERNAL if not.
  92. */
  93. static int errnoFromException(jthrowable exc, JNIEnv *env,
  94. const char *method, ...)
  95. {
  96. va_list ap;
  97. int errnum = 0;
  98. char *excClass = NULL;
  99. if (exc == NULL)
  100. goto default_error;
  101. if ((excClass = classNameOfObject((jobject) exc, env)) == NULL) {
  102. errnum = EINTERNAL;
  103. goto done;
  104. }
  105. if (!strcmp(excClass, "org.apache.hadoop.security."
  106. "AccessControlException")) {
  107. errnum = EACCES;
  108. goto done;
  109. }
  110. if (!strcmp(excClass, "org.apache.hadoop.hdfs.protocol."
  111. "QuotaExceededException")) {
  112. errnum = EDQUOT;
  113. goto done;
  114. }
  115. if (!strcmp(excClass, "java.io.FileNotFoundException")) {
  116. errnum = ENOENT;
  117. goto done;
  118. }
  119. //TODO: interpret more exceptions; maybe examine exc.getMessage()
  120. default_error:
  121. //Can't tell what went wrong, so just punt
  122. (*env)->ExceptionDescribe(env);
  123. fprintf(stderr, "Call to ");
  124. va_start(ap, method);
  125. vfprintf(stderr, method, ap);
  126. va_end(ap);
  127. fprintf(stderr, " failed!\n");
  128. errnum = EINTERNAL;
  129. done:
  130. (*env)->ExceptionClear(env);
  131. if (excClass != NULL)
  132. free(excClass);
  133. return errnum;
  134. }
  135. hdfsFS hdfsConnect(const char* host, tPort port) {
  136. // conect with NULL as user name
  137. return hdfsConnectAsUser(host, port, NULL);
  138. }
  139. hdfsFS hdfsConnectAsUser(const char* host, tPort port, const char *user)
  140. {
  141. // JAVA EQUIVALENT:
  142. // FileSystem fs = FileSystem.get(new Configuration());
  143. // return fs;
  144. JNIEnv *env = 0;
  145. jobject jConfiguration = NULL;
  146. jobject jFS = NULL;
  147. jobject jURI = NULL;
  148. jstring jURIString = NULL;
  149. jvalue jVal;
  150. jthrowable jExc = NULL;
  151. char *cURI = 0;
  152. jobject gFsRef = NULL;
  153. jstring jUserString = NULL;
  154. //Get the JNIEnv* corresponding to current thread
  155. env = getJNIEnv();
  156. if (env == NULL) {
  157. errno = EINTERNAL;
  158. return NULL;
  159. }
  160. //Create the org.apache.hadoop.conf.Configuration object
  161. jConfiguration =
  162. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  163. if (jConfiguration == NULL) {
  164. fprintf(stderr, "Can't construct instance of class "
  165. "org.apache.hadoop.conf.Configuration\n");
  166. errno = EINTERNAL;
  167. return NULL;
  168. }
  169. if (user != NULL) {
  170. jUserString = (*env)->NewStringUTF(env, user);
  171. }
  172. //Check what type of FileSystem the caller wants...
  173. if (host == NULL) {
  174. // fs = FileSytem::getLocal(conf);
  175. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "getLocal",
  176. JMETHOD1(JPARAM(HADOOP_CONF),
  177. JPARAM(HADOOP_LOCALFS)),
  178. jConfiguration) != 0) {
  179. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  180. "FileSystem::getLocal");
  181. goto done;
  182. }
  183. jFS = jVal.l;
  184. }
  185. //FileSystem.get(conf) -> FileSystem.get(FileSystem.getDefaultUri(conf),
  186. // conf, user)
  187. else if (!strcmp(host, "default") && port == 0) {
  188. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS,
  189. "getDefaultUri",
  190. "(Lorg/apache/hadoop/conf/Configuration;)Ljava/net/URI;",
  191. jConfiguration) != 0) {
  192. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs.",
  193. "FileSystem::getDefaultUri");
  194. goto done;
  195. }
  196. jURI = jVal.l;
  197. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "get",
  198. JMETHOD3(JPARAM(JAVA_NET_URI),
  199. JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING),
  200. JPARAM(HADOOP_FS)),
  201. jURI, jConfiguration, jUserString) != 0) {
  202. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  203. "Filesystem::get(URI, Configuration)");
  204. goto done;
  205. }
  206. jFS = jVal.l;
  207. }
  208. else {
  209. // fs = FileSystem::get(URI, conf, ugi);
  210. cURI = malloc(strlen(host)+16);
  211. sprintf(cURI, "hdfs://%s:%d", host, (int)(port));
  212. if (cURI == NULL) {
  213. fprintf (stderr, "Couldn't allocate an object of size %d",
  214. strlen(host) + 16);
  215. errno = EINTERNAL;
  216. goto done;
  217. }
  218. jURIString = (*env)->NewStringUTF(env, cURI);
  219. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, JAVA_NET_URI,
  220. "create", "(Ljava/lang/String;)Ljava/net/URI;",
  221. jURIString) != 0) {
  222. errno = errnoFromException(jExc, env, "java.net.URI::create");
  223. goto done;
  224. }
  225. jURI = jVal.l;
  226. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL, HADOOP_FS, "get",
  227. JMETHOD3(JPARAM(JAVA_NET_URI),
  228. JPARAM(HADOOP_CONF), JPARAM(JAVA_STRING),
  229. JPARAM(HADOOP_FS)),
  230. jURI, jConfiguration, jUserString) != 0) {
  231. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  232. "Filesystem::get(URI, Configuration)");
  233. goto done;
  234. }
  235. jFS = jVal.l;
  236. }
  237. done:
  238. // Release unnecessary local references
  239. destroyLocalReference(env, jConfiguration);
  240. destroyLocalReference(env, jURIString);
  241. destroyLocalReference(env, jURI);
  242. destroyLocalReference(env, jUserString);
  243. if (cURI) free(cURI);
  244. /* Create a global reference for this fs */
  245. if (jFS) {
  246. gFsRef = (*env)->NewGlobalRef(env, jFS);
  247. destroyLocalReference(env, jFS);
  248. }
  249. return gFsRef;
  250. }
  251. int hdfsDisconnect(hdfsFS fs)
  252. {
  253. // JAVA EQUIVALENT:
  254. // fs.close()
  255. //Get the JNIEnv* corresponding to current thread
  256. JNIEnv* env = getJNIEnv();
  257. if (env == NULL) {
  258. errno = EINTERNAL;
  259. return -2;
  260. }
  261. //Parameters
  262. jobject jFS = (jobject)fs;
  263. //Caught exception
  264. jthrowable jExc = NULL;
  265. //Sanity check
  266. if (fs == NULL) {
  267. errno = EBADF;
  268. return -1;
  269. }
  270. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  271. "close", "()V") != 0) {
  272. errno = errnoFromException(jExc, env, "Filesystem::close");
  273. return -1;
  274. }
  275. //Release unnecessary references
  276. (*env)->DeleteGlobalRef(env, fs);
  277. return 0;
  278. }
  279. hdfsFile hdfsOpenFile(hdfsFS fs, const char* path, int flags,
  280. int bufferSize, short replication, tSize blockSize)
  281. {
  282. /*
  283. JAVA EQUIVALENT:
  284. File f = new File(path);
  285. FSData{Input|Output}Stream f{is|os} = fs.create(f);
  286. return f{is|os};
  287. */
  288. /* Get the JNIEnv* corresponding to current thread */
  289. JNIEnv* env = getJNIEnv();
  290. if (env == NULL) {
  291. errno = EINTERNAL;
  292. return NULL;
  293. }
  294. jobject jFS = (jobject)fs;
  295. if (flags & O_RDWR) {
  296. fprintf(stderr, "ERROR: cannot open an hdfs file in O_RDWR mode\n");
  297. errno = ENOTSUP;
  298. return NULL;
  299. }
  300. if ((flags & O_CREAT) && (flags & O_EXCL)) {
  301. fprintf(stderr, "WARN: hdfs does not truly support O_CREATE && O_EXCL\n");
  302. }
  303. /* The hadoop java api/signature */
  304. const char* method = ((flags & O_WRONLY) == 0) ? "open" : (flags & O_APPEND) ? "append" : "create";
  305. const char* signature = ((flags & O_WRONLY) == 0) ?
  306. JMETHOD2(JPARAM(HADOOP_PATH), "I", JPARAM(HADOOP_ISTRM)) :
  307. (flags & O_APPEND) ?
  308. JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_OSTRM)) :
  309. JMETHOD2(JPARAM(HADOOP_PATH), "ZISJ", JPARAM(HADOOP_OSTRM));
  310. /* Return value */
  311. hdfsFile file = NULL;
  312. /* Create an object of org.apache.hadoop.fs.Path */
  313. jobject jPath = constructNewObjectOfPath(env, path);
  314. if (jPath == NULL) {
  315. return NULL;
  316. }
  317. /* Get the Configuration object from the FileSystem object */
  318. jvalue jVal;
  319. jobject jConfiguration = NULL;
  320. jthrowable jExc = NULL;
  321. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  322. "getConf", JMETHOD1("", JPARAM(HADOOP_CONF))) != 0) {
  323. errno = errnoFromException(jExc, env, "get configuration object "
  324. "from filesystem");
  325. destroyLocalReference(env, jPath);
  326. return NULL;
  327. }
  328. jConfiguration = jVal.l;
  329. jint jBufferSize = bufferSize;
  330. jshort jReplication = replication;
  331. jlong jBlockSize = blockSize;
  332. jstring jStrBufferSize = (*env)->NewStringUTF(env, "io.file.buffer.size");
  333. jstring jStrReplication = (*env)->NewStringUTF(env, "dfs.replication");
  334. jstring jStrBlockSize = (*env)->NewStringUTF(env, "dfs.block.size");
  335. //bufferSize
  336. if (!bufferSize) {
  337. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  338. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  339. jStrBufferSize, 4096) != 0) {
  340. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  341. "Configuration::getInt");
  342. goto done;
  343. }
  344. jBufferSize = jVal.i;
  345. }
  346. if ((flags & O_WRONLY) && (flags & O_APPEND) == 0) {
  347. //replication
  348. if (!replication) {
  349. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  350. HADOOP_CONF, "getInt", "(Ljava/lang/String;I)I",
  351. jStrReplication, 1) != 0) {
  352. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  353. "Configuration::getInt");
  354. goto done;
  355. }
  356. jReplication = jVal.i;
  357. }
  358. //blockSize
  359. if (!blockSize) {
  360. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jConfiguration,
  361. HADOOP_CONF, "getLong", "(Ljava/lang/String;J)J",
  362. jStrBlockSize, 67108864)) {
  363. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  364. "FileSystem::%s(%s)", method,
  365. signature);
  366. goto done;
  367. }
  368. jBlockSize = jVal.j;
  369. }
  370. }
  371. /* Create and return either the FSDataInputStream or
  372. FSDataOutputStream references jobject jStream */
  373. // READ?
  374. if ((flags & O_WRONLY) == 0) {
  375. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  376. method, signature, jPath, jBufferSize)) {
  377. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  378. "FileSystem::%s(%s)", method,
  379. signature);
  380. goto done;
  381. }
  382. } else if ((flags & O_WRONLY) && (flags & O_APPEND)) {
  383. // WRITE/APPEND?
  384. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  385. method, signature, jPath)) {
  386. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  387. "FileSystem::%s(%s)", method,
  388. signature);
  389. goto done;
  390. }
  391. } else {
  392. // WRITE/CREATE
  393. jboolean jOverWrite = 1;
  394. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  395. method, signature, jPath, jOverWrite,
  396. jBufferSize, jReplication, jBlockSize)) {
  397. errno = errnoFromException(jExc, env, "org.apache.hadoop.conf."
  398. "FileSystem::%s(%s)", method,
  399. signature);
  400. goto done;
  401. }
  402. }
  403. file = malloc(sizeof(struct hdfsFile_internal));
  404. if (!file) {
  405. errno = ENOMEM;
  406. } else {
  407. file->file = (*env)->NewGlobalRef(env, jVal.l);
  408. file->type = (((flags & O_WRONLY) == 0) ? INPUT : OUTPUT);
  409. destroyLocalReference(env, jVal.l);
  410. }
  411. done:
  412. //Delete unnecessary local references
  413. destroyLocalReference(env, jStrBufferSize);
  414. destroyLocalReference(env, jStrReplication);
  415. destroyLocalReference(env, jStrBlockSize);
  416. destroyLocalReference(env, jConfiguration);
  417. destroyLocalReference(env, jPath);
  418. return file;
  419. }
  420. int hdfsCloseFile(hdfsFS fs, hdfsFile file)
  421. {
  422. // JAVA EQUIVALENT:
  423. // file.close
  424. //Get the JNIEnv* corresponding to current thread
  425. JNIEnv* env = getJNIEnv();
  426. if (env == NULL) {
  427. errno = EINTERNAL;
  428. return -2;
  429. }
  430. //Parameters
  431. jobject jStream = (jobject)(file ? file->file : NULL);
  432. //Caught exception
  433. jthrowable jExc = NULL;
  434. //Sanity check
  435. if (!file || file->type == UNINITIALIZED) {
  436. errno = EBADF;
  437. return -1;
  438. }
  439. //The interface whose 'close' method to be called
  440. const char* interface = (file->type == INPUT) ?
  441. HADOOP_ISTRM : HADOOP_OSTRM;
  442. if (invokeMethod(env, NULL, &jExc, INSTANCE, jStream, interface,
  443. "close", "()V") != 0) {
  444. errno = errnoFromException(jExc, env, "%s::close", interface);
  445. return -1;
  446. }
  447. //De-allocate memory
  448. free(file);
  449. (*env)->DeleteGlobalRef(env, jStream);
  450. return 0;
  451. }
  452. int hdfsExists(hdfsFS fs, const char *path)
  453. {
  454. JNIEnv *env = getJNIEnv();
  455. if (env == NULL) {
  456. errno = EINTERNAL;
  457. return -2;
  458. }
  459. jobject jPath = constructNewObjectOfPath(env, path);
  460. jvalue jVal;
  461. jthrowable jExc = NULL;
  462. jobject jFS = (jobject)fs;
  463. if (jPath == NULL) {
  464. return -1;
  465. }
  466. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  467. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
  468. jPath) != 0) {
  469. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  470. "FileSystem::exists");
  471. destroyLocalReference(env, jPath);
  472. return -1;
  473. }
  474. destroyLocalReference(env, jPath);
  475. return jVal.z ? 0 : -1;
  476. }
  477. tSize hdfsRead(hdfsFS fs, hdfsFile f, void* buffer, tSize length)
  478. {
  479. // JAVA EQUIVALENT:
  480. // byte [] bR = new byte[length];
  481. // fis.read(bR);
  482. //Get the JNIEnv* corresponding to current thread
  483. JNIEnv* env = getJNIEnv();
  484. if (env == NULL) {
  485. errno = EINTERNAL;
  486. return -1;
  487. }
  488. //Parameters
  489. jobject jInputStream = (jobject)(f ? f->file : NULL);
  490. jbyteArray jbRarray;
  491. jint noReadBytes = 0;
  492. jvalue jVal;
  493. jthrowable jExc = NULL;
  494. //Sanity check
  495. if (!f || f->type == UNINITIALIZED) {
  496. errno = EBADF;
  497. return -1;
  498. }
  499. //Error checking... make sure that this file is 'readable'
  500. if (f->type != INPUT) {
  501. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  502. errno = EINVAL;
  503. return -1;
  504. }
  505. //Read the requisite bytes
  506. jbRarray = (*env)->NewByteArray(env, length);
  507. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  508. "read", "([B)I", jbRarray) != 0) {
  509. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  510. "FSDataInputStream::read");
  511. noReadBytes = -1;
  512. }
  513. else {
  514. noReadBytes = jVal.i;
  515. if (noReadBytes > 0) {
  516. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  517. } else {
  518. //This is a valid case: there aren't any bytes left to read!
  519. if (noReadBytes == 0 || noReadBytes < -1) {
  520. fprintf(stderr, "WARN: FSDataInputStream.read returned invalid return code - libhdfs returning EOF, i.e., 0: %d\n", noReadBytes);
  521. }
  522. noReadBytes = 0;
  523. }
  524. errno = 0;
  525. }
  526. destroyLocalReference(env, jbRarray);
  527. return noReadBytes;
  528. }
  529. tSize hdfsPread(hdfsFS fs, hdfsFile f, tOffset position,
  530. void* buffer, tSize length)
  531. {
  532. // JAVA EQUIVALENT:
  533. // byte [] bR = new byte[length];
  534. // fis.read(pos, bR, 0, length);
  535. //Get the JNIEnv* corresponding to current thread
  536. JNIEnv* env = getJNIEnv();
  537. if (env == NULL) {
  538. errno = EINTERNAL;
  539. return -1;
  540. }
  541. //Parameters
  542. jobject jInputStream = (jobject)(f ? f->file : NULL);
  543. jbyteArray jbRarray;
  544. jint noReadBytes = 0;
  545. jvalue jVal;
  546. jthrowable jExc = NULL;
  547. //Sanity check
  548. if (!f || f->type == UNINITIALIZED) {
  549. errno = EBADF;
  550. return -1;
  551. }
  552. //Error checking... make sure that this file is 'readable'
  553. if (f->type != INPUT) {
  554. fprintf(stderr, "Cannot read from a non-InputStream object!\n");
  555. errno = EINVAL;
  556. return -1;
  557. }
  558. //Read the requisite bytes
  559. jbRarray = (*env)->NewByteArray(env, length);
  560. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  561. "read", "(J[BII)I", position, jbRarray, 0, length) != 0) {
  562. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  563. "FSDataInputStream::read");
  564. noReadBytes = -1;
  565. }
  566. else {
  567. noReadBytes = jVal.i;
  568. if (noReadBytes > 0) {
  569. (*env)->GetByteArrayRegion(env, jbRarray, 0, noReadBytes, buffer);
  570. } else {
  571. //This is a valid case: there aren't any bytes left to read!
  572. if (noReadBytes == 0 || noReadBytes < -1) {
  573. fprintf(stderr, "WARN: FSDataInputStream.read returned invalid return code - libhdfs returning EOF, i.e., 0: %d\n", noReadBytes);
  574. }
  575. noReadBytes = 0;
  576. }
  577. errno = 0;
  578. }
  579. destroyLocalReference(env, jbRarray);
  580. return noReadBytes;
  581. }
  582. tSize hdfsWrite(hdfsFS fs, hdfsFile f, const void* buffer, tSize length)
  583. {
  584. // JAVA EQUIVALENT
  585. // byte b[] = str.getBytes();
  586. // fso.write(b);
  587. //Get the JNIEnv* corresponding to current thread
  588. JNIEnv* env = getJNIEnv();
  589. if (env == NULL) {
  590. errno = EINTERNAL;
  591. return -1;
  592. }
  593. //Parameters
  594. jobject jOutputStream = (jobject)(f ? f->file : 0);
  595. jbyteArray jbWarray;
  596. //Caught exception
  597. jthrowable jExc = NULL;
  598. //Sanity check
  599. if (!f || f->type == UNINITIALIZED) {
  600. errno = EBADF;
  601. return -1;
  602. }
  603. if (length < 0) {
  604. errno = EINVAL;
  605. return -1;
  606. }
  607. //Error checking... make sure that this file is 'writable'
  608. if (f->type != OUTPUT) {
  609. fprintf(stderr, "Cannot write into a non-OutputStream object!\n");
  610. errno = EINVAL;
  611. return -1;
  612. }
  613. // 'length' equals 'zero' is a valid use-case according to Posix!
  614. if (length != 0) {
  615. //Write the requisite bytes into the file
  616. jbWarray = (*env)->NewByteArray(env, length);
  617. (*env)->SetByteArrayRegion(env, jbWarray, 0, length, buffer);
  618. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  619. HADOOP_OSTRM, "write",
  620. "([B)V", jbWarray) != 0) {
  621. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  622. "FSDataOutputStream::write");
  623. length = -1;
  624. }
  625. destroyLocalReference(env, jbWarray);
  626. }
  627. //Return no. of bytes succesfully written (libc way)
  628. //i.e. 'length' itself! ;-)
  629. return length;
  630. }
  631. int hdfsSeek(hdfsFS fs, hdfsFile f, tOffset desiredPos)
  632. {
  633. // JAVA EQUIVALENT
  634. // fis.seek(pos);
  635. //Get the JNIEnv* corresponding to current thread
  636. JNIEnv* env = getJNIEnv();
  637. if (env == NULL) {
  638. errno = EINTERNAL;
  639. return -1;
  640. }
  641. //Parameters
  642. jobject jInputStream = (jobject)(f ? f->file : 0);
  643. //Caught exception
  644. jthrowable jExc = NULL;
  645. //Sanity check
  646. if (!f || f->type != INPUT) {
  647. errno = EBADF;
  648. return -1;
  649. }
  650. if (invokeMethod(env, NULL, &jExc, INSTANCE, jInputStream, HADOOP_ISTRM,
  651. "seek", "(J)V", desiredPos) != 0) {
  652. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  653. "FSDataInputStream::seek");
  654. return -1;
  655. }
  656. return 0;
  657. }
  658. tOffset hdfsTell(hdfsFS fs, hdfsFile f)
  659. {
  660. // JAVA EQUIVALENT
  661. // pos = f.getPos();
  662. //Get the JNIEnv* corresponding to current thread
  663. JNIEnv* env = getJNIEnv();
  664. if (env == NULL) {
  665. errno = EINTERNAL;
  666. return -1;
  667. }
  668. //Parameters
  669. jobject jStream = (jobject)(f ? f->file : 0);
  670. //Sanity check
  671. if (!f || f->type == UNINITIALIZED) {
  672. errno = EBADF;
  673. return -1;
  674. }
  675. const char* interface = (f->type == INPUT) ?
  676. HADOOP_ISTRM : HADOOP_OSTRM;
  677. jlong currentPos = -1;
  678. jvalue jVal;
  679. jthrowable jExc = NULL;
  680. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStream,
  681. interface, "getPos", "()J") != 0) {
  682. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  683. "FSDataInputStream::getPos");
  684. return -1;
  685. }
  686. currentPos = jVal.j;
  687. return (tOffset)currentPos;
  688. }
  689. int hdfsFlush(hdfsFS fs, hdfsFile f)
  690. {
  691. // JAVA EQUIVALENT
  692. // fos.flush();
  693. //Get the JNIEnv* corresponding to current thread
  694. JNIEnv* env = getJNIEnv();
  695. if (env == NULL) {
  696. errno = EINTERNAL;
  697. return -1;
  698. }
  699. //Parameters
  700. jobject jOutputStream = (jobject)(f ? f->file : 0);
  701. //Caught exception
  702. jthrowable jExc = NULL;
  703. //Sanity check
  704. if (!f || f->type != OUTPUT) {
  705. errno = EBADF;
  706. return -1;
  707. }
  708. if (invokeMethod(env, NULL, &jExc, INSTANCE, jOutputStream,
  709. HADOOP_OSTRM, "flush", "()V") != 0) {
  710. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  711. "FSDataInputStream::flush");
  712. return -1;
  713. }
  714. return 0;
  715. }
  716. int hdfsAvailable(hdfsFS fs, hdfsFile f)
  717. {
  718. // JAVA EQUIVALENT
  719. // fis.available();
  720. //Get the JNIEnv* corresponding to current thread
  721. JNIEnv* env = getJNIEnv();
  722. if (env == NULL) {
  723. errno = EINTERNAL;
  724. return -1;
  725. }
  726. //Parameters
  727. jobject jInputStream = (jobject)(f ? f->file : 0);
  728. //Caught exception
  729. jthrowable jExc = NULL;
  730. //Sanity check
  731. if (!f || f->type != INPUT) {
  732. errno = EBADF;
  733. return -1;
  734. }
  735. jint available = -1;
  736. jvalue jVal;
  737. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jInputStream,
  738. HADOOP_ISTRM, "available", "()I") != 0) {
  739. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  740. "FSDataInputStream::available");
  741. return -1;
  742. }
  743. available = jVal.i;
  744. return available;
  745. }
  746. int hdfsCopy(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  747. {
  748. //JAVA EQUIVALENT
  749. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath,
  750. // deleteSource = false, conf)
  751. //Get the JNIEnv* corresponding to current thread
  752. JNIEnv* env = getJNIEnv();
  753. if (env == NULL) {
  754. errno = EINTERNAL;
  755. return -1;
  756. }
  757. //Parameters
  758. jobject jSrcFS = (jobject)srcFS;
  759. jobject jDstFS = (jobject)dstFS;
  760. jobject jSrcPath = NULL;
  761. jobject jDstPath = NULL;
  762. jSrcPath = constructNewObjectOfPath(env, src);
  763. if (jSrcPath == NULL) {
  764. return -1;
  765. }
  766. jDstPath = constructNewObjectOfPath(env, dst);
  767. if (jDstPath == NULL) {
  768. destroyLocalReference(env, jSrcPath);
  769. return -1;
  770. }
  771. int retval = 0;
  772. //Create the org.apache.hadoop.conf.Configuration object
  773. jobject jConfiguration =
  774. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  775. if (jConfiguration == NULL) {
  776. fprintf(stderr, "Can't construct instance of class "
  777. "org.apache.hadoop.conf.Configuration\n");
  778. errno = EINTERNAL;
  779. destroyLocalReference(env, jSrcPath);
  780. destroyLocalReference(env, jDstPath);
  781. return -1;
  782. }
  783. //FileUtil::copy
  784. jboolean deleteSource = 0; //Only copy
  785. jvalue jVal;
  786. jthrowable jExc = NULL;
  787. if (invokeMethod(env, &jVal, &jExc, STATIC,
  788. NULL, "org/apache/hadoop/fs/FileUtil", "copy",
  789. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  790. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  791. jConfiguration) != 0) {
  792. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  793. "FileUtil::copy");
  794. retval = -1;
  795. goto done;
  796. }
  797. done:
  798. //Delete unnecessary local references
  799. destroyLocalReference(env, jConfiguration);
  800. destroyLocalReference(env, jSrcPath);
  801. destroyLocalReference(env, jDstPath);
  802. return retval;
  803. }
  804. int hdfsMove(hdfsFS srcFS, const char* src, hdfsFS dstFS, const char* dst)
  805. {
  806. //JAVA EQUIVALENT
  807. // FileUtil::copy(srcFS, srcPath, dstFS, dstPath,
  808. // deleteSource = true, conf)
  809. //Get the JNIEnv* corresponding to current thread
  810. JNIEnv* env = getJNIEnv();
  811. if (env == NULL) {
  812. errno = EINTERNAL;
  813. return -1;
  814. }
  815. //Parameters
  816. jobject jSrcFS = (jobject)srcFS;
  817. jobject jDstFS = (jobject)dstFS;
  818. jobject jSrcPath = NULL;
  819. jobject jDstPath = NULL;
  820. jSrcPath = constructNewObjectOfPath(env, src);
  821. if (jSrcPath == NULL) {
  822. return -1;
  823. }
  824. jDstPath = constructNewObjectOfPath(env, dst);
  825. if (jDstPath == NULL) {
  826. destroyLocalReference(env, jSrcPath);
  827. return -1;
  828. }
  829. int retval = 0;
  830. //Create the org.apache.hadoop.conf.Configuration object
  831. jobject jConfiguration =
  832. constructNewObjectOfClass(env, NULL, HADOOP_CONF, "()V");
  833. if (jConfiguration == NULL) {
  834. fprintf(stderr, "Can't construct instance of class "
  835. "org.apache.hadoop.conf.Configuration\n");
  836. errno = EINTERNAL;
  837. destroyLocalReference(env, jSrcPath);
  838. destroyLocalReference(env, jDstPath);
  839. return -1;
  840. }
  841. //FileUtil::copy
  842. jboolean deleteSource = 1; //Delete src after copy
  843. jvalue jVal;
  844. jthrowable jExc = NULL;
  845. if (invokeMethod(env, &jVal, &jExc, STATIC, NULL,
  846. "org/apache/hadoop/fs/FileUtil", "copy",
  847. "(Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;Lorg/apache/hadoop/fs/FileSystem;Lorg/apache/hadoop/fs/Path;ZLorg/apache/hadoop/conf/Configuration;)Z",
  848. jSrcFS, jSrcPath, jDstFS, jDstPath, deleteSource,
  849. jConfiguration) != 0) {
  850. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  851. "FileUtil::copy(move)");
  852. retval = -1;
  853. goto done;
  854. }
  855. done:
  856. //Delete unnecessary local references
  857. destroyLocalReference(env, jConfiguration);
  858. destroyLocalReference(env, jSrcPath);
  859. destroyLocalReference(env, jDstPath);
  860. return retval;
  861. }
  862. int hdfsDelete(hdfsFS fs, const char* path)
  863. {
  864. // JAVA EQUIVALENT:
  865. // File f = new File(path);
  866. // bool retval = fs.delete(f);
  867. //Get the JNIEnv* corresponding to current thread
  868. JNIEnv* env = getJNIEnv();
  869. if (env == NULL) {
  870. errno = EINTERNAL;
  871. return -1;
  872. }
  873. jobject jFS = (jobject)fs;
  874. //Create an object of java.io.File
  875. jobject jPath = constructNewObjectOfPath(env, path);
  876. if (jPath == NULL) {
  877. return -1;
  878. }
  879. //Delete the file
  880. jvalue jVal;
  881. jthrowable jExc = NULL;
  882. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  883. "delete", "(Lorg/apache/hadoop/fs/Path;)Z",
  884. jPath) != 0) {
  885. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  886. "FileSystem::delete");
  887. destroyLocalReference(env, jPath);
  888. return -1;
  889. }
  890. //Delete unnecessary local references
  891. destroyLocalReference(env, jPath);
  892. return (jVal.z) ? 0 : -1;
  893. }
  894. int hdfsRename(hdfsFS fs, const char* oldPath, const char* newPath)
  895. {
  896. // JAVA EQUIVALENT:
  897. // Path old = new Path(oldPath);
  898. // Path new = new Path(newPath);
  899. // fs.rename(old, new);
  900. //Get the JNIEnv* corresponding to current thread
  901. JNIEnv* env = getJNIEnv();
  902. if (env == NULL) {
  903. errno = EINTERNAL;
  904. return -1;
  905. }
  906. jobject jFS = (jobject)fs;
  907. //Create objects of org.apache.hadoop.fs.Path
  908. jobject jOldPath = NULL;
  909. jobject jNewPath = NULL;
  910. jOldPath = constructNewObjectOfPath(env, oldPath);
  911. if (jOldPath == NULL) {
  912. return -1;
  913. }
  914. jNewPath = constructNewObjectOfPath(env, newPath);
  915. if (jNewPath == NULL) {
  916. destroyLocalReference(env, jOldPath);
  917. return -1;
  918. }
  919. //Rename the file
  920. jvalue jVal;
  921. jthrowable jExc = NULL;
  922. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS, "rename",
  923. JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_PATH), "Z"),
  924. jOldPath, jNewPath) != 0) {
  925. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  926. "FileSystem::rename");
  927. destroyLocalReference(env, jOldPath);
  928. destroyLocalReference(env, jNewPath);
  929. return -1;
  930. }
  931. //Delete unnecessary local references
  932. destroyLocalReference(env, jOldPath);
  933. destroyLocalReference(env, jNewPath);
  934. return (jVal.z) ? 0 : -1;
  935. }
  936. char* hdfsGetWorkingDirectory(hdfsFS fs, char* buffer, size_t bufferSize)
  937. {
  938. // JAVA EQUIVALENT:
  939. // Path p = fs.getWorkingDirectory();
  940. // return p.toString()
  941. //Get the JNIEnv* corresponding to current thread
  942. JNIEnv* env = getJNIEnv();
  943. if (env == NULL) {
  944. errno = EINTERNAL;
  945. return NULL;
  946. }
  947. jobject jFS = (jobject)fs;
  948. jobject jPath = NULL;
  949. jvalue jVal;
  950. jthrowable jExc = NULL;
  951. //FileSystem::getWorkingDirectory()
  952. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
  953. HADOOP_FS, "getWorkingDirectory",
  954. "()Lorg/apache/hadoop/fs/Path;") != 0 ||
  955. jVal.l == NULL) {
  956. errno = errnoFromException(jExc, env, "FileSystem::"
  957. "getWorkingDirectory");
  958. return NULL;
  959. }
  960. jPath = jVal.l;
  961. //Path::toString()
  962. jstring jPathString;
  963. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath,
  964. "org/apache/hadoop/fs/Path", "toString",
  965. "()Ljava/lang/String;") != 0) {
  966. errno = errnoFromException(jExc, env, "Path::toString");
  967. destroyLocalReference(env, jPath);
  968. return NULL;
  969. }
  970. jPathString = jVal.l;
  971. const char *jPathChars = (const char*)
  972. ((*env)->GetStringUTFChars(env, jPathString, NULL));
  973. //Copy to user-provided buffer
  974. strncpy(buffer, jPathChars, bufferSize);
  975. //Delete unnecessary local references
  976. (*env)->ReleaseStringUTFChars(env, jPathString, jPathChars);
  977. destroyLocalReference(env, jPathString);
  978. destroyLocalReference(env, jPath);
  979. return buffer;
  980. }
  981. int hdfsSetWorkingDirectory(hdfsFS fs, const char* path)
  982. {
  983. // JAVA EQUIVALENT:
  984. // fs.setWorkingDirectory(Path(path));
  985. //Get the JNIEnv* corresponding to current thread
  986. JNIEnv* env = getJNIEnv();
  987. if (env == NULL) {
  988. errno = EINTERNAL;
  989. return -1;
  990. }
  991. jobject jFS = (jobject)fs;
  992. int retval = 0;
  993. jthrowable jExc = NULL;
  994. //Create an object of org.apache.hadoop.fs.Path
  995. jobject jPath = constructNewObjectOfPath(env, path);
  996. if (jPath == NULL) {
  997. return -1;
  998. }
  999. //FileSystem::setWorkingDirectory()
  1000. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1001. "setWorkingDirectory",
  1002. "(Lorg/apache/hadoop/fs/Path;)V", jPath) != 0) {
  1003. errno = errnoFromException(jExc, env, "FileSystem::"
  1004. "setWorkingDirectory");
  1005. retval = -1;
  1006. }
  1007. //Delete unnecessary local references
  1008. destroyLocalReference(env, jPath);
  1009. return retval;
  1010. }
  1011. int hdfsCreateDirectory(hdfsFS fs, const char* path)
  1012. {
  1013. // JAVA EQUIVALENT:
  1014. // fs.mkdirs(new Path(path));
  1015. //Get the JNIEnv* corresponding to current thread
  1016. JNIEnv* env = getJNIEnv();
  1017. if (env == NULL) {
  1018. errno = EINTERNAL;
  1019. return -1;
  1020. }
  1021. jobject jFS = (jobject)fs;
  1022. //Create an object of org.apache.hadoop.fs.Path
  1023. jobject jPath = constructNewObjectOfPath(env, path);
  1024. if (jPath == NULL) {
  1025. return -1;
  1026. }
  1027. //Create the directory
  1028. jvalue jVal;
  1029. jVal.z = 0;
  1030. jthrowable jExc = NULL;
  1031. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1032. "mkdirs", "(Lorg/apache/hadoop/fs/Path;)Z",
  1033. jPath) != 0) {
  1034. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1035. "FileSystem::mkdirs");
  1036. goto done;
  1037. }
  1038. done:
  1039. //Delete unnecessary local references
  1040. destroyLocalReference(env, jPath);
  1041. return (jVal.z) ? 0 : -1;
  1042. }
  1043. int hdfsSetReplication(hdfsFS fs, const char* path, int16_t replication)
  1044. {
  1045. // JAVA EQUIVALENT:
  1046. // fs.setReplication(new Path(path), replication);
  1047. //Get the JNIEnv* corresponding to current thread
  1048. JNIEnv* env = getJNIEnv();
  1049. if (env == NULL) {
  1050. errno = EINTERNAL;
  1051. return -1;
  1052. }
  1053. jobject jFS = (jobject)fs;
  1054. //Create an object of org.apache.hadoop.fs.Path
  1055. jobject jPath = constructNewObjectOfPath(env, path);
  1056. if (jPath == NULL) {
  1057. return -1;
  1058. }
  1059. //Create the directory
  1060. jvalue jVal;
  1061. jthrowable jExc = NULL;
  1062. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1063. "setReplication", "(Lorg/apache/hadoop/fs/Path;S)Z",
  1064. jPath, replication) != 0) {
  1065. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1066. "FileSystem::setReplication");
  1067. goto done;
  1068. }
  1069. done:
  1070. //Delete unnecessary local references
  1071. destroyLocalReference(env, jPath);
  1072. return (jVal.z) ? 0 : -1;
  1073. }
  1074. int hdfsChown(hdfsFS fs, const char* path, const char *owner, const char *group)
  1075. {
  1076. // JAVA EQUIVALENT:
  1077. // fs.setOwner(path, owner, group)
  1078. //Get the JNIEnv* corresponding to current thread
  1079. JNIEnv* env = getJNIEnv();
  1080. if (env == NULL) {
  1081. errno = EINTERNAL;
  1082. return -1;
  1083. }
  1084. if (owner == NULL && group == NULL) {
  1085. fprintf(stderr, "Both owner and group cannot be null in chown");
  1086. errno = EINVAL;
  1087. return -1;
  1088. }
  1089. jobject jFS = (jobject)fs;
  1090. jobject jPath = constructNewObjectOfPath(env, path);
  1091. if (jPath == NULL) {
  1092. return -1;
  1093. }
  1094. jstring jOwnerString = (*env)->NewStringUTF(env, owner);
  1095. jstring jGroupString = (*env)->NewStringUTF(env, group);
  1096. //Create the directory
  1097. int ret = 0;
  1098. jthrowable jExc = NULL;
  1099. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1100. "setOwner", JMETHOD3(JPARAM(HADOOP_PATH), JPARAM(JAVA_STRING), JPARAM(JAVA_STRING), JAVA_VOID),
  1101. jPath, jOwnerString, jGroupString) != 0) {
  1102. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1103. "FileSystem::setOwner");
  1104. ret = -1;
  1105. goto done;
  1106. }
  1107. done:
  1108. destroyLocalReference(env, jPath);
  1109. destroyLocalReference(env, jOwnerString);
  1110. destroyLocalReference(env, jGroupString);
  1111. return ret;
  1112. }
  1113. int hdfsChmod(hdfsFS fs, const char* path, short mode)
  1114. {
  1115. // JAVA EQUIVALENT:
  1116. // fs.setPermission(path, FsPermission)
  1117. //Get the JNIEnv* corresponding to current thread
  1118. JNIEnv* env = getJNIEnv();
  1119. if (env == NULL) {
  1120. errno = EINTERNAL;
  1121. return -1;
  1122. }
  1123. jobject jFS = (jobject)fs;
  1124. // construct jPerm = FsPermission.createImmutable(short mode);
  1125. jshort jmode = mode;
  1126. jobject jPermObj =
  1127. constructNewObjectOfClass(env, NULL, HADOOP_FSPERM,"(S)V",jmode);
  1128. if (jPermObj == NULL) {
  1129. return -2;
  1130. }
  1131. //Create an object of org.apache.hadoop.fs.Path
  1132. jobject jPath = constructNewObjectOfPath(env, path);
  1133. if (jPath == NULL) {
  1134. destroyLocalReference(env, jPermObj);
  1135. return -3;
  1136. }
  1137. //Create the directory
  1138. int ret = 0;
  1139. jthrowable jExc = NULL;
  1140. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1141. "setPermission", JMETHOD2(JPARAM(HADOOP_PATH), JPARAM(HADOOP_FSPERM), JAVA_VOID),
  1142. jPath, jPermObj) != 0) {
  1143. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1144. "FileSystem::setPermission");
  1145. ret = -1;
  1146. goto done;
  1147. }
  1148. done:
  1149. destroyLocalReference(env, jPath);
  1150. destroyLocalReference(env, jPermObj);
  1151. return ret;
  1152. }
  1153. int hdfsUtime(hdfsFS fs, const char* path, tTime mtime, tTime atime)
  1154. {
  1155. // JAVA EQUIVALENT:
  1156. // fs.setTimes(src, mtime, atime)
  1157. //Get the JNIEnv* corresponding to current thread
  1158. JNIEnv* env = getJNIEnv();
  1159. if (env == NULL) {
  1160. errno = EINTERNAL;
  1161. return -1;
  1162. }
  1163. jobject jFS = (jobject)fs;
  1164. //Create an object of org.apache.hadoop.fs.Path
  1165. jobject jPath = constructNewObjectOfPath(env, path);
  1166. if (jPath == NULL) {
  1167. fprintf(stderr, "could not construct path object\n");
  1168. return -2;
  1169. }
  1170. jlong jmtime = mtime * (jlong)1000;
  1171. jlong jatime = atime * (jlong)1000;
  1172. int ret = 0;
  1173. jthrowable jExc = NULL;
  1174. if (invokeMethod(env, NULL, &jExc, INSTANCE, jFS, HADOOP_FS,
  1175. "setTimes", JMETHOD3(JPARAM(HADOOP_PATH), "J", "J", JAVA_VOID),
  1176. jPath, jmtime, jatime) != 0) {
  1177. fprintf(stderr, "call to setTime failed\n");
  1178. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1179. "FileSystem::setTimes");
  1180. ret = -1;
  1181. goto done;
  1182. }
  1183. done:
  1184. destroyLocalReference(env, jPath);
  1185. return ret;
  1186. }
  1187. char***
  1188. hdfsGetHosts(hdfsFS fs, const char* path, tOffset start, tOffset length)
  1189. {
  1190. // JAVA EQUIVALENT:
  1191. // fs.getFileBlockLoctions(new Path(path), start, length);
  1192. //Get the JNIEnv* corresponding to current thread
  1193. JNIEnv* env = getJNIEnv();
  1194. if (env == NULL) {
  1195. errno = EINTERNAL;
  1196. return NULL;
  1197. }
  1198. jobject jFS = (jobject)fs;
  1199. //Create an object of org.apache.hadoop.fs.Path
  1200. jobject jPath = constructNewObjectOfPath(env, path);
  1201. if (jPath == NULL) {
  1202. return NULL;
  1203. }
  1204. jvalue jFSVal;
  1205. jthrowable jFSExc = NULL;
  1206. if (invokeMethod(env, &jFSVal, &jFSExc, INSTANCE, jFS,
  1207. HADOOP_FS, "getFileStatus",
  1208. "(Lorg/apache/hadoop/fs/Path;)"
  1209. "Lorg/apache/hadoop/fs/FileStatus;",
  1210. jPath) != 0) {
  1211. errno = errnoFromException(jFSExc, env, "org.apache.hadoop.fs."
  1212. "FileSystem::getFileStatus");
  1213. destroyLocalReference(env, jPath);
  1214. return NULL;
  1215. }
  1216. jobject jFileStatus = jFSVal.l;
  1217. //org.apache.hadoop.fs.FileSystem::getFileBlockLocations
  1218. char*** blockHosts = NULL;
  1219. jobjectArray jBlockLocations;;
  1220. jvalue jVal;
  1221. jthrowable jExc = NULL;
  1222. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS,
  1223. HADOOP_FS, "getFileBlockLocations",
  1224. "(Lorg/apache/hadoop/fs/FileStatus;JJ)"
  1225. "[Lorg/apache/hadoop/fs/BlockLocation;",
  1226. jFileStatus, start, length) != 0) {
  1227. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1228. "FileSystem::getFileBlockLocations");
  1229. destroyLocalReference(env, jPath);
  1230. destroyLocalReference(env, jFileStatus);
  1231. return NULL;
  1232. }
  1233. jBlockLocations = jVal.l;
  1234. //Figure out no of entries in jBlockLocations
  1235. //Allocate memory and add NULL at the end
  1236. jsize jNumFileBlocks = (*env)->GetArrayLength(env, jBlockLocations);
  1237. blockHosts = malloc(sizeof(char**) * (jNumFileBlocks+1));
  1238. if (blockHosts == NULL) {
  1239. errno = ENOMEM;
  1240. goto done;
  1241. }
  1242. blockHosts[jNumFileBlocks] = NULL;
  1243. if (jNumFileBlocks == 0) {
  1244. errno = 0;
  1245. goto done;
  1246. }
  1247. //Now parse each block to get hostnames
  1248. int i = 0;
  1249. for (i=0; i < jNumFileBlocks; ++i) {
  1250. jobject jFileBlock =
  1251. (*env)->GetObjectArrayElement(env, jBlockLocations, i);
  1252. jvalue jVal;
  1253. jobjectArray jFileBlockHosts;
  1254. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFileBlock, HADOOP_BLK_LOC,
  1255. "getHosts", "()[Ljava/lang/String;") ||
  1256. jVal.l == NULL) {
  1257. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1258. "BlockLocation::getHosts");
  1259. destroyLocalReference(env, jPath);
  1260. destroyLocalReference(env, jFileStatus);
  1261. destroyLocalReference(env, jBlockLocations);
  1262. return NULL;
  1263. }
  1264. jFileBlockHosts = jVal.l;
  1265. //Figure out no of hosts in jFileBlockHosts
  1266. //Allocate memory and add NULL at the end
  1267. jsize jNumBlockHosts = (*env)->GetArrayLength(env, jFileBlockHosts);
  1268. blockHosts[i] = malloc(sizeof(char*) * (jNumBlockHosts+1));
  1269. if (blockHosts[i] == NULL) {
  1270. int x = 0;
  1271. for (x=0; x < i; ++x) {
  1272. free(blockHosts[x]);
  1273. }
  1274. free(blockHosts);
  1275. errno = ENOMEM;
  1276. goto done;
  1277. }
  1278. blockHosts[i][jNumBlockHosts] = NULL;
  1279. //Now parse each hostname
  1280. int j = 0;
  1281. const char *hostName;
  1282. for (j=0; j < jNumBlockHosts; ++j) {
  1283. jstring jHost =
  1284. (*env)->GetObjectArrayElement(env, jFileBlockHosts, j);
  1285. hostName =
  1286. (const char*)((*env)->GetStringUTFChars(env, jHost, NULL));
  1287. blockHosts[i][j] = strdup(hostName);
  1288. (*env)->ReleaseStringUTFChars(env, jHost, hostName);
  1289. destroyLocalReference(env, jHost);
  1290. }
  1291. destroyLocalReference(env, jFileBlockHosts);
  1292. }
  1293. done:
  1294. //Delete unnecessary local references
  1295. destroyLocalReference(env, jPath);
  1296. destroyLocalReference(env, jFileStatus);
  1297. destroyLocalReference(env, jBlockLocations);
  1298. return blockHosts;
  1299. }
  1300. void hdfsFreeHosts(char ***blockHosts)
  1301. {
  1302. int i, j;
  1303. for (i=0; blockHosts[i]; i++) {
  1304. for (j=0; blockHosts[i][j]; j++) {
  1305. free(blockHosts[i][j]);
  1306. }
  1307. free(blockHosts[i]);
  1308. }
  1309. free(blockHosts);
  1310. }
  1311. tOffset hdfsGetDefaultBlockSize(hdfsFS fs)
  1312. {
  1313. // JAVA EQUIVALENT:
  1314. // fs.getDefaultBlockSize();
  1315. //Get the JNIEnv* corresponding to current thread
  1316. JNIEnv* env = getJNIEnv();
  1317. if (env == NULL) {
  1318. errno = EINTERNAL;
  1319. return -1;
  1320. }
  1321. jobject jFS = (jobject)fs;
  1322. //FileSystem::getDefaultBlockSize()
  1323. tOffset blockSize = -1;
  1324. jvalue jVal;
  1325. jthrowable jExc = NULL;
  1326. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1327. "getDefaultBlockSize", "()J") != 0) {
  1328. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1329. "FileSystem::getDefaultBlockSize");
  1330. return -1;
  1331. }
  1332. blockSize = jVal.j;
  1333. return blockSize;
  1334. }
  1335. tOffset hdfsGetCapacity(hdfsFS fs)
  1336. {
  1337. // JAVA EQUIVALENT:
  1338. // fs.getRawCapacity();
  1339. //Get the JNIEnv* corresponding to current thread
  1340. JNIEnv* env = getJNIEnv();
  1341. if (env == NULL) {
  1342. errno = EINTERNAL;
  1343. return -1;
  1344. }
  1345. jobject jFS = (jobject)fs;
  1346. if (!((*env)->IsInstanceOf(env, jFS,
  1347. globalClassReference(HADOOP_DFS, env)))) {
  1348. fprintf(stderr, "hdfsGetCapacity works only on a "
  1349. "DistributedFileSystem!\n");
  1350. return -1;
  1351. }
  1352. //FileSystem::getRawCapacity()
  1353. jvalue jVal;
  1354. jthrowable jExc = NULL;
  1355. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS,
  1356. "getRawCapacity", "()J") != 0) {
  1357. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1358. "FileSystem::getRawCapacity");
  1359. return -1;
  1360. }
  1361. return jVal.j;
  1362. }
  1363. tOffset hdfsGetUsed(hdfsFS fs)
  1364. {
  1365. // JAVA EQUIVALENT:
  1366. // fs.getRawUsed();
  1367. //Get the JNIEnv* corresponding to current thread
  1368. JNIEnv* env = getJNIEnv();
  1369. if (env == NULL) {
  1370. errno = EINTERNAL;
  1371. return -1;
  1372. }
  1373. jobject jFS = (jobject)fs;
  1374. if (!((*env)->IsInstanceOf(env, jFS,
  1375. globalClassReference(HADOOP_DFS, env)))) {
  1376. fprintf(stderr, "hdfsGetUsed works only on a "
  1377. "DistributedFileSystem!\n");
  1378. return -1;
  1379. }
  1380. //FileSystem::getRawUsed()
  1381. jvalue jVal;
  1382. jthrowable jExc = NULL;
  1383. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS,
  1384. "getRawUsed", "()J") != 0) {
  1385. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1386. "FileSystem::getRawUsed");
  1387. return -1;
  1388. }
  1389. return jVal.j;
  1390. }
  1391. static int
  1392. getFileInfoFromStat(JNIEnv *env, jobject jStat, hdfsFileInfo *fileInfo)
  1393. {
  1394. jvalue jVal;
  1395. jthrowable jExc = NULL;
  1396. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1397. HADOOP_STAT, "isDir", "()Z") != 0) {
  1398. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1399. "FileStatus::isDir");
  1400. return -1;
  1401. }
  1402. fileInfo->mKind = jVal.z ? kObjectKindDirectory : kObjectKindFile;
  1403. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1404. HADOOP_STAT, "getReplication", "()S") != 0) {
  1405. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1406. "FileStatus::getReplication");
  1407. return -1;
  1408. }
  1409. fileInfo->mReplication = jVal.s;
  1410. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1411. HADOOP_STAT, "getBlockSize", "()J") != 0) {
  1412. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1413. "FileStatus::getBlockSize");
  1414. return -1;
  1415. }
  1416. fileInfo->mBlockSize = jVal.j;
  1417. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1418. HADOOP_STAT, "getModificationTime", "()J") != 0) {
  1419. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1420. "FileStatus::getModificationTime");
  1421. return -1;
  1422. }
  1423. fileInfo->mLastMod = (tTime) (jVal.j / 1000);
  1424. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1425. HADOOP_STAT, "getAccessTime", "()J") != 0) {
  1426. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1427. "FileStatus::getAccessTime");
  1428. return -1;
  1429. }
  1430. fileInfo->mLastAccess = (tTime) (jVal.j / 1000);
  1431. if (fileInfo->mKind == kObjectKindFile) {
  1432. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat,
  1433. HADOOP_STAT, "getLen", "()J") != 0) {
  1434. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1435. "FileStatus::getLen");
  1436. return -1;
  1437. }
  1438. fileInfo->mSize = jVal.j;
  1439. }
  1440. jobject jPath;
  1441. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1442. "getPath", "()Lorg/apache/hadoop/fs/Path;") ||
  1443. jVal.l == NULL) {
  1444. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1445. "Path::getPath");
  1446. return -1;
  1447. }
  1448. jPath = jVal.l;
  1449. jstring jPathName;
  1450. const char *cPathName;
  1451. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPath, HADOOP_PATH,
  1452. "toString", "()Ljava/lang/String;")) {
  1453. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1454. "Path::toString");
  1455. destroyLocalReference(env, jPath);
  1456. return -1;
  1457. }
  1458. jPathName = jVal.l;
  1459. cPathName = (const char*) ((*env)->GetStringUTFChars(env, jPathName, NULL));
  1460. fileInfo->mName = strdup(cPathName);
  1461. (*env)->ReleaseStringUTFChars(env, jPathName, cPathName);
  1462. destroyLocalReference(env, jPath);
  1463. destroyLocalReference(env, jPathName);
  1464. jstring jUserName;
  1465. const char* cUserName;
  1466. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1467. "getOwner", "()Ljava/lang/String;")) {
  1468. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1469. "FileStatus::getOwner failed!\n");
  1470. errno = EINTERNAL;
  1471. return -1;
  1472. }
  1473. jUserName = jVal.l;
  1474. cUserName = (const char*) ((*env)->GetStringUTFChars(env, jUserName, NULL));
  1475. fileInfo->mOwner = strdup(cUserName);
  1476. (*env)->ReleaseStringUTFChars(env, jUserName, cUserName);
  1477. destroyLocalReference(env, jUserName);
  1478. jstring jGroupName;
  1479. const char* cGroupName;
  1480. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1481. "getGroup", "()Ljava/lang/String;")) {
  1482. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1483. "FileStatus::getGroup failed!\n");
  1484. errno = EINTERNAL;
  1485. return -1;
  1486. }
  1487. jGroupName = jVal.l;
  1488. cGroupName = (const char*) ((*env)->GetStringUTFChars(env, jGroupName, NULL));
  1489. fileInfo->mGroup = strdup(cGroupName);
  1490. (*env)->ReleaseStringUTFChars(env, jGroupName, cGroupName);
  1491. destroyLocalReference(env, jGroupName);
  1492. jobject jPermission;
  1493. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jStat, HADOOP_STAT,
  1494. "getPermission", "()Lorg/apache/hadoop/fs/permission/FsPermission;") ||
  1495. jVal.l == NULL) {
  1496. fprintf(stderr, "Call to org.apache.hadoop.fs."
  1497. "FileStatus::getPermission failed!\n");
  1498. errno = EINTERNAL;
  1499. return -1;
  1500. }
  1501. jPermission = jVal.l;
  1502. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jPermission, HADOOP_FSPERM,
  1503. "toShort", "()S") != 0) {
  1504. fprintf(stderr, "Call to org.apache.hadoop.fs.permission."
  1505. "FsPermission::toShort failed!\n");
  1506. errno = EINTERNAL;
  1507. return -1;
  1508. }
  1509. fileInfo->mPermissions = jVal.s;
  1510. destroyLocalReference(env, jPermission);
  1511. return 0;
  1512. }
  1513. static int
  1514. getFileInfo(JNIEnv *env, jobject jFS, jobject jPath, hdfsFileInfo *fileInfo)
  1515. {
  1516. // JAVA EQUIVALENT:
  1517. // fs.isDirectory(f)
  1518. // fs.getModificationTime()
  1519. // fs.getAccessTime()
  1520. // fs.getLength(f)
  1521. // f.getPath()
  1522. // f.getOwner()
  1523. // f.getGroup()
  1524. // f.getPermission().toShort()
  1525. jobject jStat;
  1526. jvalue jVal;
  1527. jthrowable jExc = NULL;
  1528. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1529. "exists", JMETHOD1(JPARAM(HADOOP_PATH), "Z"),
  1530. jPath) != 0) {
  1531. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1532. "FileSystem::exists");
  1533. return -1;
  1534. }
  1535. if (jVal.z == 0) {
  1536. errno = ENOENT;
  1537. return -1;
  1538. }
  1539. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_FS,
  1540. "getFileStatus", JMETHOD1(JPARAM(HADOOP_PATH), JPARAM(HADOOP_STAT)),
  1541. jPath) != 0) {
  1542. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1543. "FileSystem::getFileStatus");
  1544. return -1;
  1545. }
  1546. jStat = jVal.l;
  1547. int ret = getFileInfoFromStat(env, jStat, fileInfo);
  1548. destroyLocalReference(env, jStat);
  1549. return ret;
  1550. }
  1551. hdfsFileInfo* hdfsListDirectory(hdfsFS fs, const char* path, int *numEntries)
  1552. {
  1553. // JAVA EQUIVALENT:
  1554. // Path p(path);
  1555. // Path []pathList = fs.listPaths(p)
  1556. // foreach path in pathList
  1557. // getFileInfo(path)
  1558. //Get the JNIEnv* corresponding to current thread
  1559. JNIEnv* env = getJNIEnv();
  1560. if (env == NULL) {
  1561. errno = EINTERNAL;
  1562. return NULL;
  1563. }
  1564. jobject jFS = (jobject)fs;
  1565. //Create an object of org.apache.hadoop.fs.Path
  1566. jobject jPath = constructNewObjectOfPath(env, path);
  1567. if (jPath == NULL) {
  1568. return NULL;
  1569. }
  1570. hdfsFileInfo *pathList = 0;
  1571. jobjectArray jPathList = NULL;
  1572. jvalue jVal;
  1573. jthrowable jExc = NULL;
  1574. if (invokeMethod(env, &jVal, &jExc, INSTANCE, jFS, HADOOP_DFS, "listStatus",
  1575. JMETHOD1(JPARAM(HADOOP_PATH), JARRPARAM(HADOOP_STAT)),
  1576. jPath) != 0) {
  1577. errno = errnoFromException(jExc, env, "org.apache.hadoop.fs."
  1578. "FileSystem::listStatus");
  1579. destroyLocalReference(env, jPath);
  1580. return NULL;
  1581. }
  1582. jPathList = jVal.l;
  1583. //Figure out no of entries in that directory
  1584. jsize jPathListSize = (*env)->GetArrayLength(env, jPathList);
  1585. *numEntries = jPathListSize;
  1586. if (jPathListSize == 0) {
  1587. errno = 0;
  1588. goto done;
  1589. }
  1590. //Allocate memory
  1591. pathList = calloc(jPathListSize, sizeof(hdfsFileInfo));
  1592. if (pathList == NULL) {
  1593. errno = ENOMEM;
  1594. goto done;
  1595. }
  1596. //Save path information in pathList
  1597. jsize i;
  1598. jobject tmpStat;
  1599. for (i=0; i < jPathListSize; ++i) {
  1600. tmpStat = (*env)->GetObjectArrayElement(env, jPathList, i);
  1601. if (getFileInfoFromStat(env, tmpStat, &pathList[i])) {
  1602. hdfsFreeFileInfo(pathList, jPathListSize);
  1603. destroyLocalReference(env, tmpStat);
  1604. pathList = NULL;
  1605. goto done;
  1606. }
  1607. destroyLocalReference(env, tmpStat);
  1608. }
  1609. done:
  1610. //Delete unnecessary local references
  1611. destroyLocalReference(env, jPath);
  1612. destroyLocalReference(env, jPathList);
  1613. return pathList;
  1614. }
  1615. hdfsFileInfo *hdfsGetPathInfo(hdfsFS fs, const char* path)
  1616. {
  1617. // JAVA EQUIVALENT:
  1618. // File f(path);
  1619. // fs.isDirectory(f)
  1620. // fs.lastModified() ??
  1621. // fs.getLength(f)
  1622. // f.getPath()
  1623. //Get the JNIEnv* corresponding to current thread
  1624. JNIEnv* env = getJNIEnv();
  1625. if (env == NULL) {
  1626. errno = EINTERNAL;
  1627. return NULL;
  1628. }
  1629. jobject jFS = (jobject)fs;
  1630. //Create an object of org.apache.hadoop.fs.Path
  1631. jobject jPath = constructNewObjectOfPath(env, path);
  1632. if (jPath == NULL) {
  1633. return NULL;
  1634. }
  1635. hdfsFileInfo *fileInfo = calloc(1, sizeof(hdfsFileInfo));
  1636. if (getFileInfo(env, jFS, jPath, fileInfo)) {
  1637. hdfsFreeFileInfo(fileInfo, 1);
  1638. fileInfo = NULL;
  1639. goto done;
  1640. }
  1641. done:
  1642. //Delete unnecessary local references
  1643. destroyLocalReference(env, jPath);
  1644. return fileInfo;
  1645. }
  1646. void hdfsFreeFileInfo(hdfsFileInfo *hdfsFileInfo, int numEntries)
  1647. {
  1648. //Free the mName
  1649. int i;
  1650. for (i=0; i < numEntries; ++i) {
  1651. if (hdfsFileInfo[i].mName) {
  1652. free(hdfsFileInfo[i].mName);
  1653. }
  1654. }
  1655. //Free entire block
  1656. free(hdfsFileInfo);
  1657. }
  1658. /**
  1659. * vim: ts=4: sw=4: et:
  1660. */