first commit

This commit is contained in:
“17BaoH” 2021-05-29 14:37:54 +08:00
parent 98bd217e51
commit 9c4b5bd360
792 changed files with 290656 additions and 16 deletions

BIN
JCEC-doc.pdf Normal file

Binary file not shown.

106
JCEC-fastdfs/.gitignore vendored Executable file
View File

@ -0,0 +1,106 @@
# Makefile.in
storage/Makefile
tracker/Makefile
client/test/Makefile
client/Makefile
# client/fdfs_link_library.sh.in
client/fdfs_link_library.sh
# Prerequisites
*.d
# Compiled Object files
*.slo
*.lo
*.o
*.obj
# Precompiled Headers
*.gch
*.pch
# Compiled Dynamic libraries
*.so
*.dylib
*.dSYM
*.dll
# Fortran module files
*.mod
*.smod
# Compiled Static libraries
*.lai
*.la
*.a
*.lib
# Executables
*.exe
*.out
*.app
client/fdfs_append_file
client/fdfs_appender_test
client/fdfs_appender_test1
client/fdfs_crc32
client/fdfs_delete_file
client/fdfs_download_file
client/fdfs_file_info
client/fdfs_monitor
client/fdfs_test
client/fdfs_test1
client/fdfs_upload_appender
client/fdfs_upload_file
client/fdfs_regenerate_filename
client/test/fdfs_monitor
client/test/fdfs_test
client/test/fdfs_test1
storage/fdfs_storaged
tracker/fdfs_trackerd
test/combine_result
test/100M
test/10M
test/1M
test/200K
test/50K
test/5K
test/gen_files
test/test_delete
test/test_download
test/test_upload
test/upload/
test/download/
test/delete/
# other
php_client/.deps
php_client/.libs/
php_client/Makefile
php_client/Makefile.fragments
php_client/Makefile.global
php_client/Makefile.objects
php_client/acinclude.m4
php_client/aclocal.m4
php_client/autom4te.cache/
php_client/build/
php_client/config.guess
php_client/config.h
php_client/config.h.in
php_client/config.log
php_client/config.nice
php_client/config.status
php_client/config.sub
php_client/configure
php_client/configure.ac
php_client/install-sh
php_client/libtool
php_client/ltmain.sh
php_client/missing
php_client/mkinstalldirs
php_client/run-tests.php
# fastdfs runtime paths
data/
logs/

5
JCEC-fastdfs/.vscode/settings.json vendored Executable file
View File

@ -0,0 +1,5 @@
{
"files.associations": {
"dfs_func.h": "c"
}
}

675
JCEC-fastdfs/COPYING-3_0.txt Executable file
View File

@ -0,0 +1,675 @@
GNU GENERAL PUBLIC LICENSE
Version 3, 29 June 2007
Copyright (C) 2007 Free Software Foundation, Inc. <http://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
Preamble
The GNU General Public License is a free, copyleft license for
software and other kinds of works.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
the GNU General Public License is intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users. We, the Free Software Foundation, use the
GNU General Public License for most of our software; it applies also to
any other work released this way by its authors. You can apply it to
your programs, too.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
To protect your rights, we need to prevent others from denying you
these rights or asking you to surrender the rights. Therefore, you have
certain responsibilities if you distribute copies of the software, or if
you modify it: responsibilities to respect the freedom of others.
For example, if you distribute copies of such a program, whether
gratis or for a fee, you must pass on to the recipients the same
freedoms that you received. You must make sure that they, too, receive
or can get the source code. And you must show them these terms so they
know their rights.
Developers that use the GNU GPL protect your rights with two steps:
(1) assert copyright on the software, and (2) offer you this License
giving you legal permission to copy, distribute and/or modify it.
For the developers' and authors' protection, the GPL clearly explains
that there is no warranty for this free software. For both users' and
authors' sake, the GPL requires that modified versions be marked as
changed, so that their problems will not be attributed erroneously to
authors of previous versions.
Some devices are designed to deny users access to install or run
modified versions of the software inside them, although the manufacturer
can do so. This is fundamentally incompatible with the aim of
protecting users' freedom to change the software. The systematic
pattern of such abuse occurs in the area of products for individuals to
use, which is precisely where it is most unacceptable. Therefore, we
have designed this version of the GPL to prohibit the practice for those
products. If such problems arise substantially in other domains, we
stand ready to extend this provision to those domains in future versions
of the GPL, as needed to protect the freedom of users.
Finally, every program is threatened constantly by software patents.
States should not allow patents to restrict development and use of
software on general-purpose computers, but in those that do, we wish to
avoid the special danger that patents applied to a free program could
make it effectively proprietary. To prevent this, the GPL assures that
patents cannot be used to render the program non-free.
The precise terms and conditions for copying, distribution and
modification follow.
TERMS AND CONDITIONS
0. Definitions.
"This License" refers to version 3 of the GNU General Public License.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Use with the GNU Affero General Public License.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU Affero General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the special requirements of the GNU Affero General Public License,
section 13, concerning interaction through a network will apply to the
combination as such.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU General Public License from time to time. Such new versions will
be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If the program does terminal interaction, make it output a short
notice like this when it starts in an interactive mode:
<program> Copyright (C) <year> <name of author>
This program comes with ABSOLUTELY NO WARRANTY; for details type `show w'.
This is free software, and you are welcome to redistribute it
under certain conditions; type `show c' for details.
The hypothetical commands `show w' and `show c' should show the appropriate
parts of the General Public License. Of course, your program's commands
might be different; for a GUI interface, you would use an "about box".
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU GPL, see
<http://www.gnu.org/licenses/>.
The GNU General Public License does not permit incorporating your program
into proprietary programs. If your program is a subroutine library, you
may consider it more useful to permit linking proprietary applications with
the library. If this is what you want to do, use the GNU Lesser General
Public License instead of this License. But first, please read
<http://www.gnu.org/philosophy/why-not-lgpl.html>.

818
JCEC-fastdfs/HISTORY Executable file
View File

@ -0,0 +1,818 @@
Version 6.06 2019-12-30
* bugfixed: fdfs_storaged can't quit normally
* bugfixed: init/memset return ip address to ascii 0 for Java SDK
Version 6.05 2019-12-25
* fdfs_trackerd and fdfs_storaged print the server version in usage.
you can execute fdfs_trackerd or fdfs_storaged without parameters
to show the server version
* trunk server support compress the trunk binlog periodically,
the config items in tracker.conf: trunk_compress_binlog_interval
and trunk_compress_binlog_time_base
* trunk binlog compression support transaction
* support backup binlog file when truncate trunk binlog,
the config item in tracker.conf: trunk_binlog_max_backups
* support alignment size for trunk space allocation
the config item in tracker.conf: trunk_alloc_alignment_size
* support merge free trunk spaces
the config item in tracker.conf: trunk_free_space_merge
* support delete unused trunk files
the config item in tracker.conf: delete_unused_trunk_files
* fdfs_monitor.c: do NOT call getHostnameByIp
NOTE: you MUST upgrade libfastcommon to V1.43 or later
Version 6.04 2019-12-05
* storage_report_ip_changed ignore result EEXIST
* use get_gzip_command_filename from libfastcommon v1.42
* support compress error log and access log
* disk recovery support multi-threads to speed up
* bugfix: should use memset to init pReader in function
storage_reader_init, this bug is caused by v6.01
NOTE: you MUST upgrade libfastcommon to V1.42 or later
Version 6.03 2019-11-20
* dual IPs support two different types of inner (intranet) IPs
* storage server request tracker server to change it's status
to that of tracker leader when the storage server found
it's status inconsistence
* bugfix: fdfs_monitor fix get index of the specified tracker server
* storage server write to data_init_flag and mark file safely
(write to temp file then rename)
* code refine: combine g_fdfs_store_paths and g_path_space_list,
and extent struct FDFSStorePathInfo
* check store path's mark file to prevent confusion
* new selected tracker leader do NOT notify self by network
* larger network_timeout for fetching one-store-path binlog
when disk recovery
NOTE: the tracker and storage server must upgrade together
Version 6.02 2019-11-12
* get_file_info calculate CRC32 for appender file type
* disk recovery download file to local temp file then rename it
when the local file exists
* support regenerate filename for appender file
NOTE: the regenerated file will be a normal file!
Version 6.01 2019-10-25
* compress and uncompress binlog file by gzip when need,
config items in storage.conf: compress_binlog and compress_binlog_time
* bugfix: must check and create data path before write_to_pid_file
in fdfs_storaged.c
Version 6.00 2019-10-16
* tracker and storage server support dual IPs
1. you can config dual tracker IPs in storage.conf and client.conf,
the configuration item name is "tracker_server"
2. you can config dual storage IPs in storage_ids.conf
more detail please see the config files.
NOTE: you MUST upgrade libfastcommon to V1.41 or later
the tracker and storage server must upgrade together
* storage server get IP from tracker server
* storage server report current tracker IP to the tracker server when join
* tracker server check tracker list when storage server join
* use socketCreateExAuto and socketClientExAuto exported by libfastcommon
Version 5.12 2018-06-07
* code refine for rare case
* replace print format OFF_PRINTF_FORMAT to PRId64
* php_ext fix zend_object_store_get_object call in php5.5
* make.sh uses macros define in /usr/include/fastcommon/_os_define.h
* correct CRC32, you must upgrade libfastcommon to V1.38 or later
Version 5.11 2017-05-26
* bug fixed: file_offset has no effect when use trunk file
* add storage access log header
* http.conf add parameter http.multi_range.enabed
Version 5.10 2017-03-29
* use fc_safe_read instead of read, and fc_safe_write instead of write
you must upgrade libfastcommon to V1.35 or later
* fix getFileContentEx read bytes,
you must upgrade libfastcommon to V1.36 or later
* do NOT sync storage server info to tracker leader
* adjust parameter store_server when use_trunk_file is true
* clear sync src id when tracker response ENOENT
* log more info when fdfs_recv_header / fdfs_recv_response fail
Version 5.09 2016-12-29
* bug fixed: list_all_groups expand buffer auto for so many groups
* tracker.conf add parameters: min_buff_size and max_buff_size
* php extension fix free object bug in PHP 7
Version 5.08 2016-04-08
* install library to $(TARGET_PREFIX)/lib anyway
* php extension compiled in PHP 7
* dio thread use blocked_queue and php extension use php7_ext_wrapper.h,
you must upgrade libfastcommon to V1.25 or later
* remove common/linux_stack_trace.[hc]
Version 5.07 2015-09-13
* schedule task add the "second" field
* make.sh changed, you must upgrade libfastcommon to V1.21 or later
* bug fixed: storage_disk_recovery.c skip the first file (binlog first line)
* bug fixed: should close connection after fetch binlog
* fdfs_storaged.c: advance the position of daemon_init
* set log rotate time format
* bug fixed: must check store_path_index
Version 5.06 2015-05-12
* compile passed in mac OS Darwin
* correct scripts in subdir init.d
* check item thread_stack_size in storage.conf, you must
upgrade libfastcommon to V1.14 or later
Version 5.05 2014-11-22
* tracker_mem.c log more info
* remove useless global variable: g_network_tv
* storage can fetch it's group_name from tracker server
Version 5.04 2014-09-16
* add fastdfs.spec for build RPM on Linux
* depend on libfastcommon
* in multi tracker servers case, when receive higher status like
online / active and the storage status is wait_sync or syncing,
the tracker adjust storage status to newer, and the storage rejoin
to the tracker server
* fdfs_monitor support delete empty group
* bug fixed: two tracker leaders occur in rare case
* add connection stats
* delete old log files, add parameter: log_file_keep_days
Version 5.03 2014-08-10
* network send and recv retry when error EINTR happen
* support mac OS Darwin
* use newest logger from libfastcommon
* patches by liangry@ucweb.com
* bug fixed: can't sync large files cause by v5.02
* use newest files from libfastcommon
* change TRACKER_SYNC_STATUS_FILE_INTERVAL from 3600 to 300
* socket send and recv ignore erno EINTR
Version 5.02 2014-07-20
* corect README spell mistake
* bug fixed: can't deal sync truncate file exception
* remove tracker_global.c extern keyword to tracker_global.h
* change log level from ERROR to DEBUG when IOEVENT_ERROR
* php callback should use INIT_ZVAL to init zval variable
* add function short2buff and buff2short
* add get_url_content_ex to support buffer passed by caller
* logger can set rotate time format
* logger can log header line
* #include <stdbool.h> to use C99 bool
* logger can delete old rotated files
* bug fixed: connection pool should NOT increase counter when connect fail
* logger.c do NOT call fsync after write
Version 5.01 2014-02-02
* trunk binlog be compressed when trunk init
* bug fixed: sync trunk binlog file to other storage servers immediately when
the trunk server init done
* move ioevent_loop.[hc] and fast_task_queue.[hc] from tracker/ to common/
* hash table support locks
* hash talbe support new functions: hash_inc and hash_inc_ex
Version 5.00 2013-12-23
* discard libevent, use epoll in Linux, kqueue in FreeBSD, port in SunOS directly
* do_notify_leader_changed force close connection when target is myself
* modify the INSTALL file and tracker/Makefile.in
Version 4.08 2013-11-30
* bug fixed: FDFS_DOWNLOAD_SERVER_ROUND_ROBIN change to FDFS_STORE_SERVER_ROUND_ROBIN
* dio_init use memset to init buffer
* disable linger setting (setsockopt with option SO_LINGER)
* change log level from error to warning when file not exist on storage server
Version 4.07 2013-06-02
* make.sh add -lpthread by ldconfig check
* support multi accept threads
* tracker and storage server close client connection when recv invalid package
* client/storage_client.c: file_exist with silence flag
* tracker and storage process support start, stop and restart command
* tracker/tracker_proto.c fdfs_recv_header: logDebug change to logError
Version 4.06 2013-01-24
* fdfs_upload_file tool enhancement
* fdfs_download_file tool support offset and download size
* trunk file upload support sub paths rotating correctly
* add function: fdfs_http_get_file_extension
* sync truncate file operation anyway
Version 4.05 2012-12-30
* client/fdfs_upload_file.c can specify storage ip port and store path index
* add connection pool
* client load storage ids config
* common/ini_file_reader.c does NOT call chdir
* keep the mtime of file same
* use g_current_time instead of call time function
* remove embed HTTP support
Version 4.04 2012-12-02
* bug fixed: get storage server id when storage daemon init
* storage id in filename use global variable
* dynamic alloc memory 8 bytes alignment
* fast_task_queue support memory pool chain
Version 4.03 2012-11-18
* trunk_mgr/trunk_mem.c: log error and add more debug info
* file id generated by storage server can include storage server ID
Version 4.02 2012-10-30
* validate file_ext_name and prefix_name when upload file
* storage.conf add parameter: file_sync_skip_invalid_record
* add offset debug info when sync file fail
* bug fixed: log to binlog also if the file exists when sync file
* tracker and storage error log support rotate
* support rotate log by file size
* rotate log when receive HUP signal
* fdfs_monitor support set trunk server
* bug fixed: tracker_mem.c correct double mutex lock
Version 4.01 2012-10-21
* trunk_mgr/trunk_mem.c: trunk init flag check more strictly
* file signature for checking file duplicate support MD5
* slave file support both symbol link and direct file
* tracker server log trunk server change logs
Version 4.00 2012-10-06
* identify storage server by ID instead of IP address
* tracker.conf: storage reserved space can use ratio such as 10%
* storage server support access log
* appender file and trunk file also use rand number in file id
* bug fixed: test_upload.c: char file_id[64] change to: char file_id[128]
* set pipe reading fd with attribute O_NOATIME
* bug fixed: correct php extension call_user_function TSRMLS_DC with TSRMLS_CC
Version 3.11 2012-08-04
* setsockopt set linger.l_linger to micro-seconds in FreeBSD and seconds
in others
* trunk binlog reader skip incorrect records
* bug fixed: single disk recovery support symbol link and trunk file
* storage generate filename enhancement
* ETIME change to ETIMEDOUT for FreeBSD
* tracker_mem.c: load storage server ignore empty ip address
Version 3.10 2012-07-22
* check and init trunk file more gracefully
* remove unused-but-set-variable
* bug fixed: return correct group name when g_check_file_duplicate is true
* bug fixed: php extension call_user_function replace TSRMLS_CC with TSRMLS_DC
* large the interval of tracker re-select trunk server
* trunk free block check duplicate using avl tree
* trunk file sync overwrite the dest file anyway
* common/avl_tree.c: free data when delete
* tracker.conf add parameter: trunk_init_reload_from_binlog, when this flag
is set to true, load all free trunk blocks from the trunk binlog
* trunk status control only by trunk_mem.c and memcmp struct FDFSTrunkFullInfo
avoid memory alignment problem
* auto remove the too old temp file
Version 3.09 2012-07-08
* make.sh avoid override config files of /etc/fdfs/
* common/logger.c: function log_init can be called more than once
* php extension logInfo change to logDebug
* c client logInfo change to logDebug
* storage_dio.c log info more properly
* delete the trunk space which be occupied
* tracker.conf add parameter: trunk_init_check_occupying, when this flag
is set to true, do not add the trunk nodes which be occupied
* another method to get local ip addresses
Version 3.08 2012-05-27
* FAST_MAX_LOCAL_IP_ADDRS change from 4 to 16
* appender file support modify
* appender file support truncate
Version 3.07 2012-05-13
* tracker/tracker_mem.c: check storage ip address is not empty
* remove direct IO support
* trunk binlog sync optimization
* php extension compile passed in PHP 5.4.0
* get local ip addresses enhancement
* trunk server select the storage server whose binglog file size is max
* sync trunk binlog file correctly when trunk server changed
Version 3.06 2012-01-22
* add common/avl_tree.h and common/avl_tree.c
* organize trunk free blocks using AVL tree
* find the trunk server for each group when current tracker be a leader
* common/sched_thread.c can add schedule entry dynamicly
* support creating trunk file advancely
Version 3.05 2011-12-20
* remove compile warnings
* storage server's store_path_count can be more than that of group
* bug fixed: common/fast_mblock.c malloc bytes are not enough
* make.sh support OS: HP-UX
Version 3.04 2011-11-25
* bug fixed: duplicate files only save one entry ok with trunk file mode
* bug fixed: sync correctly with more binlog files
* fdfs_file_info query file info from storage server
* bug fixed: php extension compile error using gcc 4.6.1 as:
variable 'store_path_index' set but not used
* bug fixed: delete the metadata of trunked file correctly
* bug fixed: append file ok when check duplicate is on
* storage/trunk_mgr/trunk_shared.[hc]: trunk_file_stat_func do not
use function pointer
* bug fixed: storage/trunk_mgr/trunk_shared.c base64_decode_auto
overflow 1 byte
* bug fixed: delete slave file correctly
* bug fixed: remove debug info
* md5 function name changed to avoid conflict
Version 3.03 2011-10-16
* ignore existed link when sync link file
* http token checking support persistent token
* add functions: storage_file_exist and storage_file_exist1
* php minfo add fastdfs version info
* make.sh changed
* client move libevent dependency
Version 3.02 2011-09-18
* bug fixed: tracker_mem_check_add_tracker_servers add tracker server
correctly
* php client compile ok with php 5.2.17
* re-select trunk server ok
Version 3.01 2011-07-31
* bug fixed: tracker_get_connection_ex and tracker_get_connection_r_ex
connect two times with multi tracker servers
* bug fixed: tracker_mem_check_add_tracker_servers condition not correct
* all logError add source filename and line
* php extension support upload file callback
* php extension support download file callback
Version 3.00 2011-06-19
* mass small files optimization
* add fixed block memory pool: common/fast_mblock.c
* bug fixed: tracker_mem.c do NOT clear g_groups fields
* bug fixed: slave file and appender file download ok
* bug fixed: tracker / storage run by group / user, set file owner
* tracker server support leader
* client support static library
* client_func.h add functions fdfs_tracker_group_equals and
fdfs_get_file_ext_name
* bug fixed: test/dfs_func_pc.c compile ok
* storage server check free space enough when upload a file
Version 2.09 2011-02-19
* bug fixed: write_to_binlog_index then increase g_binlog_index (feedback
by koolcoy)
* disk read / write supports direct mode (avoid caching by the file system)
Version 2.08 2011-01-30
* bug fixed: fdfs_trackerd.c set g_tracker_thread_count to 0
* add cmd TRACKER_PROTO_CMD_SERVER_LIST_ONE_GROUP to support list one group
* support disk recovery automatically
* support total_upload_bytes, success_upload_bytes, total_download_bytes and
success_download_bytes etc. 18 stat fields
* tracker data file storage_groups.dat changes to storage_groups_new.dat, and
storage_servers.dat changes to storage_servers_new.dat
* support file append, add tests: fdfs_appender_test and fdfs_appender_test1
* storage_dio.c: dio_deal_task split to several functions
* tracker http check thread exit normally
* function fdfs_get_file_info_ex changed, add function fdfs_get_file_info_ex1
* fix some type cast error when compile with c++
* client add tools: fdfs_upload_appender and fdfs_append_file
Version 2.07 2011-01-09
* slave file's prefix name can be empty
* FDFS_MAX_GROUPS change from 64 to 512
* file size field in the file id changed: high 32 bits is random integer
when the file size < 2GB and the highest bit set to 1
* tracker_service.c: in function list_group_storages, use strcpy
intead of memcpy
* php extension add function fastdfs_tracker_delete_storage
* client add tool: fdfs_file_info to get file info, including file size,
create timestamp, source storage ip address and crc32 signature
* fdfs_upload_file.c: omit more error info when the local file not exist
Version 2.06 2010-12-26
* sync file op: do not sync the file which exists on dest storage server
and the file size are same
* bug fixed: sync copy file will clear the existed file on dest storage
server (truncate the file size to 0), this bug caused by V2.04
* bug fixed: make temp file discard system function mkstemp,
use file sequence No. with pthread_mutex_lock
* bug fixed: function fastdfs_tracker_list_groups, when parameter group_name
is null or empty string, return all groups info
* bug fixed: upload a file extends 2GB will fail
* bug fixed: tracker to tracker sync system data files, in function:
tracker_mem_get_tracker_server, pTrackerStatus not be set properly
Version 2.05 2010-12-05
* client/fdfs_monitor.c: add sync delay time
* tracker/fast_task_queue.c: pTask->data = pTask->arg + arg_size;
change to: pTask->data = (char *)pTask->arg + arg_size;
* bug fixed: storage_sync.c line 237 cause core dump in Ubuntu 10.04
* upload file test use mmap, support more test_upload processes
* client add three tools: fdfs_upload_file, fdfs_download_file and
fdfs_delete_file
Version 2.04 2010-11-19
* storage.conf: tracker server ip can NOT be 127.0.0.1
* do not catch signal SIGABRT
* strerror change to STRERROR macro
* sync copy file use temp filename first, rename to the correct filename
when sync done
* file id use 4 bytes CRC32 signature instead of random number
* add file: client/fdfs_crc32.c
* one of file hash code signature function change from APHash_ex
to simple_hash_ex
* bug fixed: when fdfs_storaged quit, maybe write to binlog file fail,
the error info is "Bad file descriptor"
Version 2.03 2010-11-08
* bug fixed: core dump when http.need_find_content_type=false and
http.anti_steal.check_token=true
* storage server add join_time field (create timestamp of this storage)
* tracker server fetch system files from other tracker server when
first storage server join in (tracker to tracker sync system files)
* tracker server changes the old ip address to the new address when the
storage server ip address changed
* tracker to tracker sync system data files in some case, multi tracker
server supported well
Version 2.02 2010-10-28
* get parameters function from tracker server changed,
add paramter: storage_sync_file_max_delay
* local ip functions move to common/local_ip_func.c
* when query all storage servers to store, do not increase the current
write server index
* struct FDFSHTTPParams add field: need_find_content_type
* symbol link client library to /usr/lib64 in 64 bits OS
* storage_client.c: deal file extension name correctly
Version 2.01 2010-10-17
* client/fdfs_monitor.c can specify tracker server
* micro STORAGE_STORE_PATH_PREFIX_CHAR change to
FDFS_STORAGE_STORE_PATH_PREFIX_CHAR
* php extension can set log filename
* php extension add function: fastdfs_client_version
* bug fixed: client/tracker_client.c tracker_get_connection_ex NULL pointer
* set max core dump file size to at least 256MB when DEBUG_FLAG is on,
make sure to generate core file when core dump with DEBUG_FLAG on
* upload file can get available storage server list of the group,
add command TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITHOUT_GROUP_ALL and
TRACKER_PROTO_CMD_SERVICE_QUERY_STORE_WITH_GROUP_ALL
* bug fixed: storage core dump in some case
Version 2.00 2010-08-22
* tracker network io use libevent instead of traditional io model
* storage network io use libevent instead of traditional io model
* storage disk read/write use separate threads
* tracker_mem.c malloc single group and storage struct, remove referer
* make install copy config files
* tracker.conf add two parameters: storage_sync_file_max_delay and
storage_sync_file_max_time
* client tracker_get_connection increase server_index correctly
* storage sync to storage server adds active test
* test programs compile ok
Version 1.29 2010-06-30
* add files: tracker_dump.h and tracker_dump.c, tracker dump global vars
* add files: storage_dump.h and storage_dump.c, storage dump global vars
* sockopt.c: tcprecvfile and tcpdiscard add parameter total_recv_bytes
* storage server add fields: storage_port and storage_http_port
* auto rename synced remark files when the port of all storage servers
in a group changed to another port
* connect server support timeout, adding connect_timeout parameter in
config file
* log_init set log to cache to false (no cache)
Version 1.28 2010-05-30
* tracker_servive.c: set current_write_group anyway when current group
out of space
* logger support context (multi instance)
* get storage servers by filename: if the file created one day ago (the create
timestamp of the file < current_time - 86400), any active storage server matches
* add files: common/pthread_func.h and common/pthread_func.c
* common/sched_thread.h, remove statement: extern bool g_continue_flag;
* client add libfastcommon
* global variables: g_base_path, g_network_timeout, g_version change to
g_fdfs_base_path, g_fdfs_network_timeout, g_fdfs_version
* common/fdfs_base64.h/c change name to common/base64.h/c
* make.sh use TARGET_PREFIX instead of TARGET_PATH
* protocol add ACTIVE_TEST, tracker and storage both support
* php client, bug fixed: fastdfs_connect_server, the sock must init to -1
* bug fixed: storage status not correct with multi tracker servers
* sync storage mark file and stat file to disk properly
Version 1.27 2010-04-10
* storage.conf: add if_alias_prefix parameter to get the ip address of the
local host
* storage http support domain name
* php extension add some parameters in fastdfs_client.ini
* make.sh compile use debug mode
* type off_t change to int64_t
* redirect stdout and stderr to log file
* php extension list_groups add fields: version and http_domain
Version 1.26 2010-02-28
* remove compile warning of logError
* ini reader support section
* bug fixed: tracker/tracker_mem.c sync storage server status
* use storage server http server port anyway
* bug fixed: ini reader can support relative config filename
* function enhancement: tracker server can check storage HTTP server alive
Version 1.25 2010-02-04
* storage_sync.c if source file not exist when sync a file, change from
logWarning to logDebug
* filename buff size change from 64 to 128
* bug fixed: c client and php client, log not inited cause core dump when
call log functions
* can print stack trace when process core dumped in Linux server
* bug fixed: tracker/tracker_mem.c load storage servers fail with many groups
and storage servers
* common/sockopt.c remove debug info
* storage stat add fields: version
* auto adjust when storage server ip address changed
* bug fixed: when add a new storage server, other storage servers' status keep
the same, not changed
* add macros, compile passed in cygwin, thanks Seapeak
* write to system data file using lock
* common/ini_file_reader.c: use one context parameter, not two parameters
* storage status sync modified (the code of tracker and storage both changed)
* when recv kill signal, worker thread quit more quickly, daemon process
fdfs_trackerd and fdfs_storage quit very quickly when recv kill signal
* remove compile warning info of logError
* tracker server start more quickly with many groups and storage servers
* bug fixed: correct off_t printf format
Version 1.24 2010-01-06
* call php_fdfs_close with TSRMLS_CC as php_fdfs_close(i_obj TSRMLS_CC)
* storage server to storage server report ip address as tracker client
* bug fixed: sendfile exceeds 2GB file in Linux
* bug fixed: delete storage server
* storage stat add fields: up_time and src_ip_addr
* big static or struct memeber char array buffer change to malloc in order to
decrease stack size
* FDFS_WRITE_BUFF_SIZE change from 512KB to 256KB
* bug fixed: client/storage_client.c, meta data miss when upload file
* decrease thread_stack_size default value in config files: tracker.conf
and storage.conf
Version 1.23 2009-11-29
* remove unuseless variable "sleep_secs" in tracker_report_thread_entrance
* storage can bind an address when connect to other servers (as a client)
* common/md5.h fix UINT4 typedef wrong type in 64 bit OS
* client/fdfs_test.c: print the source ip address decoded from the remote
filename
* client add function fdfs_get_file_info
* php extension add functions: fastdfs_http_gen_token and fastdfs_get_file_info
* server process will exit when the http service starts fail
* support file group, a master file with many slave files whose file id can be
combined from master file id and prefix
* php client support uploading slave file
* ip address in filename change from host byte order to network byte order
* storage sync performance enhancement, using read buffer of 64KB to avoid
reading binlog file repeatly
* storage add prototol cmd: STORAGE_PROTO_CMD_QUERY_FILE_INFO
* FDFS_FILE_EXT_NAME_MAX_LEN changed from 5 to 6
* get file info support slave file
* storage server for uploading file support priority
Version 1.22 2009-10-12
* bug fixed: common/shared_func.c allow_hosts array maybe overflow in some case
* tracker/tracker_mem.c: protocol TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL,
return at least a storage server when active storage
server count of the group > 0
* bug fixed: when client connection disconnected, always log debug or error info
* make.sh: default not install FastDFS services in Linux server
* common/sockopt.c: setsockopt level SOL_TCP only supported in Linux
* common/http_func.c: do not use function strsep because strsep is not portable
* client upload file support callback function
* client support multi tracker groups (multi FastDFS clusters)
* bug fixed: thread_stack_size not correct when the param thread_stack_size
not set in the config file
* supply php extension (directory name: php_client)
* c client reconnect server (tracker or storage) when network IO error
* c client: make tracker server index counter thread safely
Version 1.21 2009-09-19
* bug fixed: when source storage server synced file to new storage server done,
it's status changed to ONLINE (should keep as ACTIVE, report by zhouzezhong)
* add thread_stack_size in config file, default value is 1MB (report by chhxo)
* tracker and storage server use setsockopt to keep alive
(report by zhouzezhong)
* bug fixed: storage server with multi-path, upload file fail when the free
space of each path <= reserved space (the total free space > reserved space,
report by zhouzezhong)
* storage_sync.c: when connect fail, do not change the dest storage server '
status to offline
* tracker_service.c and storage_service.c change log level from WARNING to DEBUG
when client connection disconnected (report by Jney402)
* bug fixed: tracker_client.c correct store_path_index return by tracker server
(report by happy_fastdfs)
* bug fixed: tracker_service.c when store_lookup set to 2 (load balance), use
another pthread lock to avoid long time lock waiting
(report by happy_fastdfs)
* add service shell scripts in directory: init.d
(services will auto installed on Linux, report by hugwww)
Version 1.20 2009-09-05
* base64 use context, functions changed
* common/ini_file_reader.c: fix memory leak
* tracker server support HTTP protocol, one thread mode
* storage server support HTTP protocol, one thread mode
* fix bug: storage server rebuild, auto sync data correctly
* fix bug: sync data fail (correct storage server status)
* when storage server idle time exceeds check_active_interval seconds,
set it's status to offline
* tracker counter thread safely
Version 1.19 2009-07-23
* use poll instead of select in sockopt.c
* hash.c use chain impl by self
* use FastDHT 1.09 client code
* ini reader support HTTP protocol, conf file can be an url
* correct test dir compile error
* use non-block socket to increase network IO performance
* add cmd TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ALL: query all storage servers
from which the file can be dowloaded
* while (1) ... break; changed to do ... while (0);
Version 1.18 2009-05-24
* restart.sh only kill the programs match the program name and all parameters
* correct get local ip addresses
* common files do not use global vars like g_network_timeout and g_base_path
* download file support offset and download bytes
* hash function change type from unsigned int to signed int
* file size in file name support 64 bits, old bytes is 4, new bytes is 8
Version 1.17 2009-03-19
* add test programs at sub directory test/
* common/shared_func.c: rindex change to strrchr, add #include <netinet/in.h>
* support SunOS (Solaris), compile passed on SunOS 5.10
* support AIX, compile passed on AIX 5.3
* sys call statfs change to statvfs
* use scheduling thread to sync binlog buff / cache to disk, add parameter
"sync_binlog_buff_interval" to conf file storage.conf
* use FastDHT v1.07 client code
Version 1.16 2009-02-14
* client can specify group name when upload file
* tracker_service.c: cmd dispatch changed to "switch ... case"
not "if ... else if"
* storage_service.c: call fdfs_quit before tracker_disconnect_server
Version 1.15 2009-01-28
* use FastDHT v1.04 client code
* use FastDHT client thread safely
Version 1.14 2009-01-18
* storage/storage_sync.c:
old: if (reader.sync_row_count % 1000 == 0)
new: if (reader.scan_row_count % 2000 == 0)
* little adjustment for common files can be used by FastDHT
* sched_thread.h /.c add global variable g_schedule_flag to quit normally
* shared_func.h / .c add function get_time_item_from_conf
* sched_thread.h /.c support time_base of task
* hash.h / .c add function CRC32, add hash function to support stream hash
* add FastDHT client files in storage/fdht_client/
* create symbol link when the file content is duplicate,
add item "check_file_duplicate" to conf file storage.conf
* use FastDHT v1.02 client code
* auto delete invalid entry in FastDHT when the source file does not exist
Version 1.13 2008-11-29
* re-calculate group 's free space when one of it's storage servers'
free space increase
* add parameters: sync_interval, sync_start_time and sync_end_time to
storage.conf
* performance enhancement: log to buffer, flush to disk every interval seconds
* standard fds closed by daemon_init: 0(stdin), 1(stdout) and 2(stderr)
* fix bug: pthread_kill sometimes cause core dump when program terminated
* fix bug: sync.c open next binlog cause loop call
Version 1.12 2008-11-12
* storage server support multi path (mount point)
* upload file support file ext name, add source storage ip address to filename
* add delete command to delete the invalid storage server
* add client functions which combine group name and filename to file id,
add anothor client test program: fdfs_test1.c to use file id
* client download file support callback function
* add protocol cmd TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE,
and client API add tracker_query_storage_update
* add protocol cmd TRACKER_PROTO_CMD_STORAGE_SYNC_REPORT to report last
synced timestamp as dest server
* fix sync old data files to new server bug
* fcntl change to pthread_mutex_lock
Version 1.11 2008-10-04
* kill report and sync threads when recv terminate signal
* add item "store_server" in tracker.conf, by default use the first
storage server to store uploaded files
* ini_file_reader.c changed: a conf file can include other conf files
* some adjustment:
some macro name changed
add common_define.h
remove fdfs_define.c
fdfs_os_bits.h change to _os_bits.h
Version 1.10 2008-09-20
* performance optimizing: use thread pool, create all work threads at startup
* trim function op in shared_func.c
* add Makefile template Makefile.in, delete Makefile and Makefile.freebsd
change make.sh to support all unix systems (passed in Linux and FreeBSD)
Version 1.9 2008-09-14
* security enhancement: support allow hosts which can connect to the server
* server can be run by the specified group and user, set by the config file
* change make.sh and add file common/fdfs_os_bits.h,
remove the warning info of printf format for int64_t param in 64 bits system
* storage_client.c changed: auto connect to storage server when not connected
* change some macro name and function name in tracker/tracker_proto.h
Version 1.8 2008-09-07
* communication protocol changed to support large file exceed 2GB:
# all integer field is 8 bytes big-endian
# group name fixed length: FDFS_GROUP_NAME_MAX_LEN bytes
* storage stat numbers (such as total_upload_count, success_upload_count)
use int64_t (8 bytes integer)
* ini_file_reader.c add function iniGetInt64Value
* sockopt.c add function tcpsetnonblockopt
* shared_func.c add function set_nonblock
Version 1.7 2008-08-31
* performance optimizing:
# change fopen to syscall open
# increase the efficiency of socket functions tcpsenddata and tcprecvdata
* change the return value of socket funtions such as tcpsenddata,
tcprecvdata and connectserverbyip
old return value: result=1 for success, result != 1 fail
new return value: result=0 for success, result != 0 fail, return the error code
* log function enhancement:
# support log level
# parameter "log_level" added to server config file
# keep the log file opened to increase performance
* fix log format and parameter mismatched bug (check by printf)
* log CRIT message to log file when program exit unexpectedly
* Makefile add compile flag -D_FILE_OFFSET_BITS=64 to support large files
* change the type of file_size and file_offset to off_t
* change signal to sigaction
* fix client Makefile to compile library correctly
* restart.sh modified: use external command "expr" to replace shell command "let"
Version 1.6 2008-08-24
* add restart daemon shell script: restart.sh
* use setrlimit to increase max open files if necessary
* security enhancement: the format of data filename must be: HH/HH/filename,
eg. B9/F4/SLI2NAAMRPR9r8.d
* fix bug: errno is not correct where the downloaded file does not exist,
communication is broken when the download file is a directory
Version 1.5 2008-08-17
* add client function storage_download_file_to_file
* use pthread_attr_setstacksize to increase thread stack size to 1 MB
* use sendfile syscall to send file in Linux and FreeBSD
* fix bug: add O_TRUNC flag when open file to write
* remove warning info compiled by gcc 4.2
* fcntl set lock.l_len to 0
Version 1.4 2008-08-10
* storage server recv file method change
old method: recv the whole file content/buff before write to file
new method: write to file once recv a certain bytes file buff, eg. 128KB buff size
* storage client and storage server send file method change
old method: get the whole file content/buff, then send to storage server
new method: send file to storage server more times. get a certain bytes file buff, then send to storage server
* upload file package remove the one pad byte field
* remove storage status FDFS_STORAGE_STATUS_DEACTIVE and add FDFS_STORAGE_STATUS_DELETED
Version 1.3 2008-08-03
* fix bug: when meta data is empty, get meta data return error
* support java client
# memset response header to 0
# add group_name to upload file response package
Version 1.2 2008-07-27
* add client function storage_set_metadata to support setting metadata(overwrite or merge)
Version 1.1 2008-07-20
* implement storage disk report
* storing load balance between storage groups(volumes) when set store_lookup to 2
Version 1.0 2008-07-12
* first version

209
JCEC-fastdfs/INSTALL Executable file
View File

@ -0,0 +1,209 @@
Copy right 2009 Happy Fish / YuQing
FastDFS may be copied only under the terms of the GNU General
Public License V3, which may be found in the FastDFS source kit.
Please visit the FastDFS Home Page for more detail.
Chinese language: http://www.fastken.com/
# step 1. download libfastcommon source codes and install it,
# github address: https://github.com/happyfish100/libfastcommon.git
# gitee address: https://gitee.com/fastdfs100/libfastcommon.git
# command lines as:
git clone https://github.com/happyfish100/libfastcommon.git
cd libfastcommon; git checkout V1.0.43
./make.sh clean && ./make.sh && ./make.sh install
# step 2. download fastdfs source codes and install it,
# github address: https://github.com/happyfish100/fastdfs.git
# gitee address: https://gitee.com/fastdfs100/fastdfs.git
# command lines as:
git clone https://github.com/happyfish100/fastdfs.git
cd fastdfs; git checkout V6.06
./make.sh clean && ./make.sh && ./make.sh install
# step 3. setup the config files
# the setup script does NOT overwrite existing config files,
# please feel free to execute this script (take easy :)
./setup.sh /etc/fdfs
# step 4. edit or modify the config files of tracker, storage and client
such as:
vi /etc/fdfs/tracker.conf
vi /etc/fdfs/storage.conf
vi /etc/fdfs/client.conf
and so on ...
# step 5. run the server programs
# start the tracker server:
/usr/bin/fdfs_trackerd /etc/fdfs/tracker.conf restart
# start the storage server:
/usr/bin/fdfs_storaged /etc/fdfs/storage.conf restart
# (optional) in Linux, you can start fdfs_trackerd and fdfs_storaged as a service:
/sbin/service fdfs_trackerd restart
/sbin/service fdfs_storaged restart
# step 6. (optional) run monitor program
# such as:
/usr/bin/fdfs_monitor /etc/fdfs/client.conf
# step 7. (optional) run the test program
# such as:
/usr/bin/fdfs_test <client_conf_filename> <operation>
/usr/bin/fdfs_test1 <client_conf_filename> <operation>
# for example, upload a file for test:
/usr/bin/fdfs_test /etc/fdfs/client.conf upload /usr/include/stdlib.h
tracker server config file sample please see conf/tracker.conf
storage server config file sample please see conf/storage.conf
client config file sample please see conf/client.conf
Item detail
1. server common items
---------------------------------------------------
| item name | type | default | Must |
---------------------------------------------------
| base_path | string | | Y |
---------------------------------------------------
| disabled | boolean| false | N |
---------------------------------------------------
| bind_addr | string | | N |
---------------------------------------------------
| network_timeout | int | 30(s) | N |
---------------------------------------------------
| max_connections | int | 256 | N |
---------------------------------------------------
| log_level | string | info | N |
---------------------------------------------------
| run_by_group | string | | N |
---------------------------------------------------
| run_by_user | string | | N |
---------------------------------------------------
| allow_hosts | string | * | N |
---------------------------------------------------
| sync_log_buff_interval| int | 10(s) | N |
---------------------------------------------------
| thread_stack_size | string | 1M | N |
---------------------------------------------------
memo:
* base_path is the base path of sub dirs:
data and logs. base_path must exist and it's sub dirs will
be automatically created if not exist.
$base_path/data: store data files
$base_path/logs: store log files
* log_level is the standard log level as syslog, case insensitive
# emerg: for emergency
# alert
# crit: for critical
# error
# warn: for warning
# notice
# info
# debug
* allow_hosts can ocur more than once, host can be hostname or ip address,
"*" means match all ip addresses, can use range like this: 10.0.1.[1-15,20]
or host[01-08,20-25].domain.com, for example:
allow_hosts=10.0.1.[1-15,20]
allow_hosts=host[01-08,20-25].domain.com
2. tracker server items
---------------------------------------------------
| item name | type | default | Must |
---------------------------------------------------
| port | int | 22000 | N |
---------------------------------------------------
| store_lookup | int | 0 | N |
---------------------------------------------------
| store_group | string | | N |
---------------------------------------------------
| store_server | int | 0 | N |
---------------------------------------------------
| store_path | int | 0 | N |
---------------------------------------------------
| download_server | int | 0 | N |
---------------------------------------------------
| reserved_storage_space| string | 1GB | N |
---------------------------------------------------
memo:
* the value of store_lookup is:
0: round robin (default)
1: specify group
2: load balance (supported since V1.1)
* store_group is the name of group to store files.
when store_lookup set to 1(specify group),
store_group must be set to a specified group name.
* reserved_storage_space is the reserved storage space for system
or other applications. if the free(available) space of any stoarge
server in a group <= reserved_storage_space, no file can be uploaded
to this group (since V1.1)
bytes unit can be one of follows:
# G or g for gigabyte(GB)
# M or m for megabyte(MB)
# K or k for kilobyte(KB)
# no unit for byte(B)
3. storage server items
-------------------------------------------------
| item name | type | default | Must |
-------------------------------------------------
| group_name | string | | Y |
-------------------------------------------------
| tracker_server | string | | Y |
-------------------------------------------------
| port | int | 23000 | N |
-------------------------------------------------
| heart_beat_interval | int | 30(s) | N |
-------------------------------------------------
| stat_report_interval| int | 300(s) | N |
-------------------------------------------------
| sync_wait_msec | int | 100(ms) | N |
-------------------------------------------------
| sync_interval | int | 0(ms) | N |
-------------------------------------------------
| sync_start_time | string | 00:00 | N |
-------------------------------------------------
| sync_end_time | string | 23:59 | N |
-------------------------------------------------
| store_path_count | int | 1 | N |
-------------------------------------------------
| store_path0 | string |base_path| N |
-------------------------------------------------
| store_path# | string | | N |
-------------------------------------------------
|subdir_count_per_path| int | 256 | N |
-------------------------------------------------
|check_file_duplicate | boolean| 0 | N |
-------------------------------------------------
| key_namespace | string | | N |
-------------------------------------------------
| keep_alive | boolean| 0 | N |
-------------------------------------------------
| sync_binlog_buff_interval| int | 60s | N |
-------------------------------------------------
memo:
* tracker_server can ocur more than once, and tracker_server format is
"host:port", host can be hostname or ip address.
* store_path#, # for digital, based 0
* check_file_duplicate: when set to true, must work with FastDHT server,
more detail please see INSTALL of FastDHT. FastDHT download page:
http://code.google.com/p/fastdht/downloads/list
* key_namespace: FastDHT key namespace, can't be empty when
check_file_duplicate is true. the key namespace should short as possible

46
JCEC-fastdfs/README.md Executable file
View File

@ -0,0 +1,46 @@
Copyright (C) 2008 Happy Fish / YuQing
FastDFS may be copied only under the terms of the GNU General
Public License V3, which may be found in the FastDFS source kit.
Please visit the FastDFS Home Page for more detail.
Chinese language: http://www.fastken.com/
FastDFS is an open source high performance distributed file system. It's major
functions include: file storing, file syncing and file accessing (file uploading
and file downloading), and it can resolve the high capacity and load balancing
problem. FastDFS should meet the requirement of the website whose service based
on files such as photo sharing site and video sharing site.
FastDFS has two roles: tracker and storage. The tracker takes charge of
scheduling and load balancing for file access. The storage store files and it's
function is file management including: file storing, file syncing, providing file
access interface. It also manage the meta data which are attributes representing
as key value pair of the file. For example: width=1024, the key is "width" and
the value is "1024".
The tracker and storage contain one or more servers. The servers in the tracker
or storage cluster can be added to or removed from the cluster by any time without
affecting the online services. The servers in the tracker cluster are peer to peer.
The storarge servers organizing by the file volume/group to obtain high capacity.
The storage system contains one or more volumes whose files are independent among
these volumes. The capacity of the whole storage system equals to the sum of all
volumes' capacity. A file volume contains one or more storage servers whose files
are same among these servers. The servers in a file volume backup each other,
and all these servers are load balancing. When adding a storage server to a
volume, files already existing in this volume are replicated to this new server
automatically, and when this replication done, system will switch this server
online to providing storage services.
When the whole storage capacity is insufficiency, you can add one or more
volumes to expand the storage capacity. To do this, you need to add one or
more storage servers.
The identification of a file is composed of two parts: the volume name and
the file name.
Client test code use client library please refer to the directory: client/test.
For more FastDFS related articles, please subscribe the Wechat/Weixin public account
(Chinese Language): fastdfs

20
JCEC-fastdfs/README_zh.md Executable file
View File

@ -0,0 +1,20 @@
FastDFS是一款开源的分布式文件系统功能主要包括文件存储、文件同步、文件访问文件上传、文件下载解决了文件大容量存储和高性能访问的问题。FastDFS特别适合以文件为载体的在线服务如图片、视频、文档等等。
FastDFS作为一款轻量级分布式文件系统版本V6.01代码量6.3万行。FastDFS用C语言实现支持Linux、FreeBSD、MacOS等类UNIX系统。FastDFS类似google FS属于应用级文件系统不是通用的文件系统只能通过专有API访问目前提供了C和Java SDK以及PHP扩展SDK。
FastDFS为互联网应用量身定做解决大容量文件存储问题追求高性能和高扩展性。FastDFS可以看做是基于文件的key value存储系统key为文件IDvalue为文件内容因此称作分布式文件存储服务更为合适。
FastDFS的架构比较简单如下图所示
![architect](images/architect.png)
FastDFS特点如下
1分组存储简单灵活
2对等结构不存在单点
3文件ID由FastDFS生成作为文件访问凭证。FastDFS不需要传统的name server或meta server
4大、中、小文件均可以很好支持可以存储海量小文件
5一台storage支持多块磁盘支持单盘数据恢复
6提供了nginx扩展模块可以和nginx无缝衔接
7支持多线程方式上传和下载文件支持断点续传
8存储服务器上可以保存文件附加属性。
FastDFS更多更详细的功能和特性介绍请参阅FastDFS微信公众号的其他文章搜索公众号fastdfs。

78
JCEC-fastdfs/client/Makefile.in Executable file
View File

@ -0,0 +1,78 @@
.SUFFIXES: .c .o .lo
COMPILE = $(CC) $(CFLAGS)
ENABLE_STATIC_LIB = $(ENABLE_STATIC_LIB)
ENABLE_SHARED_LIB = $(ENABLE_SHARED_LIB)
INC_PATH = -I../common -I../tracker -I/usr/include/fastcommon
LIB_PATH = $(LIBS) -lfastcommon
TARGET_PATH = $(TARGET_PREFIX)/bin
TARGET_LIB = $(TARGET_PREFIX)/$(LIB_VERSION)
TARGET_INC = $(TARGET_PREFIX)/include
CONFIG_PATH = $(TARGET_CONF_PATH)
FDFS_STATIC_OBJS = ../common/fdfs_global.o ../common/fdfs_http_shared.o \
../common/mime_file_parser.o ../tracker/tracker_proto.o \
../tracker/fdfs_shared_func.o ../tracker/fdfs_server_id_func.o \
../storage/trunk_mgr/trunk_shared.o \
tracker_client.o client_func.o \
client_global.o storage_client.o
STATIC_OBJS = $(FDFS_STATIC_OBJS)
FDFS_SHARED_OBJS = ../common/fdfs_global.lo ../common/fdfs_http_shared.lo \
../common/mime_file_parser.lo ../tracker/tracker_proto.lo \
../tracker/fdfs_shared_func.lo ../tracker/fdfs_server_id_func.lo \
../storage/trunk_mgr/trunk_shared.lo \
tracker_client.lo client_func.lo \
client_global.lo storage_client.lo
FDFS_HEADER_FILES = ../common/fdfs_define.h ../common/fdfs_global.h \
../common/mime_file_parser.h ../common/fdfs_http_shared.h \
../tracker/tracker_types.h ../tracker/tracker_proto.h \
../tracker/fdfs_shared_func.h ../tracker/fdfs_server_id_func.h \
../storage/trunk_mgr/trunk_shared.h \
tracker_client.h storage_client.h storage_client1.h \
client_func.h client_global.h fdfs_client.h
ALL_OBJS = $(STATIC_OBJS) $(FDFS_SHARED_OBJS)
ALL_PRGS = fdfs_monitor fdfs_test fdfs_test1 fdfs_crc32 fdfs_upload_file \
fdfs_download_file fdfs_delete_file fdfs_file_info \
fdfs_appender_test fdfs_appender_test1 fdfs_append_file \
fdfs_upload_appender fdfs_regenerate_filename
STATIC_LIBS = libfdfsclient.a
SHARED_LIBS = libfdfsclient.so
CLIENT_SHARED_LIBS = libfdfsclient.so
ALL_LIBS = $(STATIC_LIBS) $(SHARED_LIBS)
all: $(ALL_OBJS) $(ALL_PRGS) $(ALL_LIBS)
libfdfsclient.so:
$(COMPILE) -o $@ $< -shared $(FDFS_SHARED_OBJS) $(LIB_PATH)
libfdfsclient.a:
ar rcs $@ $< $(FDFS_STATIC_OBJS)
.o:
$(COMPILE) -o $@ $< $(STATIC_OBJS) $(LIB_PATH) $(INC_PATH)
.c:
$(COMPILE) -o $@ $< $(STATIC_OBJS) $(LIB_PATH) $(INC_PATH)
.c.o:
$(COMPILE) -c -o $@ $< $(INC_PATH)
.c.lo:
$(COMPILE) -c -fPIC -o $@ $< $(INC_PATH)
install:
mkdir -p $(TARGET_PATH)
mkdir -p $(CONFIG_PATH)
mkdir -p $(TARGET_LIB)
mkdir -p $(TARGET_PREFIX)/lib
cp -f $(ALL_PRGS) $(TARGET_PATH)
if [ $(ENABLE_STATIC_LIB) -eq 1 ]; then cp -f $(STATIC_LIBS) $(TARGET_LIB); cp -f $(STATIC_LIBS) $(TARGET_PREFIX)/lib/;fi
if [ $(ENABLE_SHARED_LIB) -eq 1 ]; then cp -f $(CLIENT_SHARED_LIBS) $(TARGET_LIB); cp -f $(CLIENT_SHARED_LIBS) $(TARGET_PREFIX)/lib/;fi
mkdir -p $(TARGET_INC)/fastdfs
cp -f $(FDFS_HEADER_FILES) $(TARGET_INC)/fastdfs
if [ ! -f $(CONFIG_PATH)/client.conf.sample ]; then cp -f ../conf/client.conf $(CONFIG_PATH)/client.conf.sample; fi
clean:
rm -f $(ALL_OBJS) $(ALL_PRGS) $(ALL_LIBS)

564
JCEC-fastdfs/client/client_func.c Executable file
View File

@ -0,0 +1,564 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
//client_func.c
#include <sys/types.h>
#include <sys/stat.h>
#include <sys/socket.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <netdb.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <time.h>
#include "fdfs_define.h"
#include "fastcommon/logger.h"
#include "fdfs_global.h"
#include "fastcommon/base64.h"
#include "fastcommon/sockopt.h"
#include "fastcommon/shared_func.h"
#include "fastcommon/ini_file_reader.h"
#include "fastcommon/connection_pool.h"
#include "tracker_types.h"
#include "tracker_proto.h"
#include "client_global.h"
#include "client_func.h"
static int storage_cmp_by_ip_and_port(const void *p1, const void *p2)
{
int res;
res = strcmp(((ConnectionInfo *)p1)->ip_addr,
((ConnectionInfo *)p2)->ip_addr);
if (res != 0)
{
return res;
}
return ((ConnectionInfo *)p1)->port -
((ConnectionInfo *)p2)->port;
}
static int storage_cmp_server_info(const void *p1, const void *p2)
{
TrackerServerInfo *server1;
TrackerServerInfo *server2;
ConnectionInfo *pc1;
ConnectionInfo *pc2;
ConnectionInfo *end1;
int res;
server1 = (TrackerServerInfo *)p1;
server2 = (TrackerServerInfo *)p2;
res = server1->count - server2->count;
if (res != 0)
{
return res;
}
if (server1->count == 1)
{
return storage_cmp_by_ip_and_port(server1->connections + 0,
server2->connections + 0);
}
end1 = server1->connections + server1->count;
for (pc1=server1->connections,pc2=server2->connections; pc1<end1; pc1++,pc2++)
{
if ((res=storage_cmp_by_ip_and_port(pc1, pc2)) != 0)
{
return res;
}
}
return 0;
}
static void insert_into_sorted_servers(TrackerServerGroup *pTrackerGroup, \
TrackerServerInfo *pInsertedServer)
{
TrackerServerInfo *pDestServer;
for (pDestServer=pTrackerGroup->servers+pTrackerGroup->server_count;
pDestServer>pTrackerGroup->servers; pDestServer--)
{
if (storage_cmp_server_info(pInsertedServer, pDestServer-1) > 0)
{
memcpy(pDestServer, pInsertedServer,
sizeof(TrackerServerInfo));
return;
}
memcpy(pDestServer, pDestServer-1, sizeof(TrackerServerInfo));
}
memcpy(pDestServer, pInsertedServer, sizeof(TrackerServerInfo));
}
static int copy_tracker_servers(TrackerServerGroup *pTrackerGroup,
const char *filename, char **ppTrackerServers)
{
char **ppSrc;
char **ppEnd;
TrackerServerInfo destServer;
int result;
memset(&destServer, 0, sizeof(TrackerServerInfo));
fdfs_server_sock_reset(&destServer);
ppEnd = ppTrackerServers + pTrackerGroup->server_count;
pTrackerGroup->server_count = 0;
for (ppSrc=ppTrackerServers; ppSrc<ppEnd; ppSrc++)
{
if ((result=fdfs_parse_server_info(*ppSrc,
FDFS_TRACKER_SERVER_DEF_PORT, &destServer)) != 0)
{
return result;
}
if (bsearch(&destServer, pTrackerGroup->servers,
pTrackerGroup->server_count,
sizeof(TrackerServerInfo),
storage_cmp_server_info) == NULL)
{
insert_into_sorted_servers(pTrackerGroup, &destServer);
pTrackerGroup->server_count++;
}
}
/*
{
TrackerServerInfo *pServer;
for (pServer=pTrackerGroup->servers; pServer<pTrackerGroup->servers+ \
pTrackerGroup->server_count; pServer++)
{
//printf("server=%s:%d\n", \
pServer->ip_addr, pServer->port);
}
}
*/
return 0;
}
static int fdfs_check_tracker_group(TrackerServerGroup *pTrackerGroup,
const char *conf_filename)
{
int result;
TrackerServerInfo *pServer;
TrackerServerInfo *pEnd;
char error_info[256];
pEnd = pTrackerGroup->servers + pTrackerGroup->server_count;
for (pServer=pTrackerGroup->servers; pServer<pEnd; pServer++)
{
if ((result=fdfs_check_server_ips(pServer,
error_info, sizeof(error_info))) != 0)
{
logError("file: "__FILE__", line: %d, "
"conf file: %s, tracker_server is invalid, "
"error info: %s", __LINE__, conf_filename, error_info);
return result;
}
}
return 0;
}
int fdfs_load_tracker_group_ex(TrackerServerGroup *pTrackerGroup,
const char *conf_filename, IniContext *pIniContext)
{
int result;
int bytes;
char *ppTrackerServers[FDFS_MAX_TRACKERS];
if ((pTrackerGroup->server_count=iniGetValues(NULL, "tracker_server",
pIniContext, ppTrackerServers, FDFS_MAX_TRACKERS)) <= 0)
{
logError("file: "__FILE__", line: %d, "
"conf file \"%s\", item \"tracker_server\" not exist",
__LINE__, conf_filename);
return ENOENT;
}
bytes = sizeof(TrackerServerInfo) * pTrackerGroup->server_count;
pTrackerGroup->servers = (TrackerServerInfo *)malloc(bytes);
if (pTrackerGroup->servers == NULL)
{
logError("file: "__FILE__", line: %d, "
"malloc %d bytes fail", __LINE__, bytes);
pTrackerGroup->server_count = 0;
return errno != 0 ? errno : ENOMEM;
}
memset(pTrackerGroup->servers, 0, bytes);
if ((result=copy_tracker_servers(pTrackerGroup, conf_filename,
ppTrackerServers)) != 0)
{
pTrackerGroup->server_count = 0;
free(pTrackerGroup->servers);
pTrackerGroup->servers = NULL;
return result;
}
return fdfs_check_tracker_group(pTrackerGroup, conf_filename);
}
int fdfs_load_tracker_group(TrackerServerGroup *pTrackerGroup,
const char *conf_filename)
{
IniContext iniContext;
int result;
if ((result=iniLoadFromFile(conf_filename, &iniContext)) != 0)
{
logError("file: "__FILE__", line: %d, "
"load conf file \"%s\" fail, ret code: %d",
__LINE__, conf_filename, result);
return result;
}
result = fdfs_load_tracker_group_ex(pTrackerGroup,
conf_filename, &iniContext);
iniFreeContext(&iniContext);
return result;
}
static int fdfs_get_params_from_tracker(bool *use_storage_id)
{
IniContext iniContext;
int result;
bool continue_flag;
continue_flag = false;
if ((result=fdfs_get_ini_context_from_tracker(&g_tracker_group,
&iniContext, &continue_flag, false, NULL)) != 0)
{
return result;
}
*use_storage_id = iniGetBoolValue(NULL, "use_storage_id",
&iniContext, false);
iniFreeContext(&iniContext);
if (*use_storage_id)
{
result = fdfs_get_storage_ids_from_tracker_group(
&g_tracker_group);
}
return result;
}
static int fdfs_client_do_init_ex(TrackerServerGroup *pTrackerGroup, \
const char *conf_filename, IniContext *iniContext)
{
char *pBasePath;
int result;
bool use_storage_id;
bool load_fdfs_parameters_from_tracker;
pBasePath = iniGetStrValue(NULL, "base_path", iniContext);
if (pBasePath == NULL)
{
strcpy(g_fdfs_base_path, "/tmp");
}
else
{
snprintf(g_fdfs_base_path, sizeof(g_fdfs_base_path),
"%s", pBasePath);
chopPath(g_fdfs_base_path);
if (!fileExists(g_fdfs_base_path))
{
logError("file: "__FILE__", line: %d, " \
"\"%s\" can't be accessed, error info: %s", \
__LINE__, g_fdfs_base_path, STRERROR(errno));
return errno != 0 ? errno : ENOENT;
}
if (!isDir(g_fdfs_base_path))
{
logError("file: "__FILE__", line: %d, " \
"\"%s\" is not a directory!", \
__LINE__, g_fdfs_base_path);
return ENOTDIR;
}
}
g_fdfs_connect_timeout = iniGetIntValue(NULL, "connect_timeout", \
iniContext, DEFAULT_CONNECT_TIMEOUT);
if (g_fdfs_connect_timeout <= 0)
{
g_fdfs_connect_timeout = DEFAULT_CONNECT_TIMEOUT;
}
g_fdfs_network_timeout = iniGetIntValue(NULL, "network_timeout", \
iniContext, DEFAULT_NETWORK_TIMEOUT);
if (g_fdfs_network_timeout <= 0)
{
g_fdfs_network_timeout = DEFAULT_NETWORK_TIMEOUT;
}
if ((result=fdfs_load_tracker_group_ex(pTrackerGroup, \
conf_filename, iniContext)) != 0)
{
return result;
}
g_anti_steal_token = iniGetBoolValue(NULL, \
"http.anti_steal.check_token", \
iniContext, false);
if (g_anti_steal_token)
{
char *anti_steal_secret_key;
anti_steal_secret_key = iniGetStrValue(NULL, \
"http.anti_steal.secret_key", \
iniContext);
if (anti_steal_secret_key == NULL || \
*anti_steal_secret_key == '\0')
{
logError("file: "__FILE__", line: %d, " \
"param \"http.anti_steal.secret_key\""\
" not exist or is empty", __LINE__);
return EINVAL;
}
buffer_strcpy(&g_anti_steal_secret_key, anti_steal_secret_key);
}
g_tracker_server_http_port = iniGetIntValue(NULL, \
"http.tracker_server_port", \
iniContext, 80);
if (g_tracker_server_http_port <= 0)
{
g_tracker_server_http_port = 80;
}
if ((result=fdfs_connection_pool_init(conf_filename, iniContext)) != 0)
{
return result;
}
load_fdfs_parameters_from_tracker = iniGetBoolValue(NULL, \
"load_fdfs_parameters_from_tracker", \
iniContext, false);
if (load_fdfs_parameters_from_tracker)
{
fdfs_get_params_from_tracker(&use_storage_id);
}
else
{
use_storage_id = iniGetBoolValue(NULL, "use_storage_id", \
iniContext, false);
if (use_storage_id)
{
result = fdfs_load_storage_ids_from_file( \
conf_filename, iniContext);
}
}
#ifdef DEBUG_FLAG
logDebug("base_path=%s, " \
"connect_timeout=%d, "\
"network_timeout=%d, "\
"tracker_server_count=%d, " \
"anti_steal_token=%d, " \
"anti_steal_secret_key length=%d, " \
"use_connection_pool=%d, " \
"g_connection_pool_max_idle_time=%ds, " \
"use_storage_id=%d, storage server id count: %d\n", \
g_fdfs_base_path, g_fdfs_connect_timeout, \
g_fdfs_network_timeout, pTrackerGroup->server_count, \
g_anti_steal_token, g_anti_steal_secret_key.length, \
g_use_connection_pool, g_connection_pool_max_idle_time, \
use_storage_id, g_storage_ids_by_id.count);
#endif
return 0;
}
int fdfs_client_init_from_buffer_ex(TrackerServerGroup *pTrackerGroup, \
const char *buffer)
{
IniContext iniContext;
char *new_buff;
int result;
new_buff = strdup(buffer);
if (new_buff == NULL)
{
logError("file: "__FILE__", line: %d, " \
"strdup %d bytes fail", __LINE__, (int)strlen(buffer));
return ENOMEM;
}
result = iniLoadFromBuffer(new_buff, &iniContext);
free(new_buff);
if (result != 0)
{
logError("file: "__FILE__", line: %d, " \
"load parameters from buffer fail, ret code: %d", \
__LINE__, result);
return result;
}
result = fdfs_client_do_init_ex(pTrackerGroup, "buffer", &iniContext);
iniFreeContext(&iniContext);
return result;
}
int fdfs_client_init_ex(TrackerServerGroup *pTrackerGroup, \
const char *conf_filename)
{
IniContext iniContext;
int result;
if ((result=iniLoadFromFile(conf_filename, &iniContext)) != 0)
{
logError("file: "__FILE__", line: %d, " \
"load conf file \"%s\" fail, ret code: %d", \
__LINE__, conf_filename, result);
return result;
}
result = fdfs_client_do_init_ex(pTrackerGroup, conf_filename, \
&iniContext);
iniFreeContext(&iniContext);
return result;
}
int fdfs_copy_tracker_group(TrackerServerGroup *pDestTrackerGroup, \
TrackerServerGroup *pSrcTrackerGroup)
{
int bytes;
TrackerServerInfo *pDestServer;
TrackerServerInfo *pDestServerEnd;
bytes = sizeof(TrackerServerInfo) * pSrcTrackerGroup->server_count;
pDestTrackerGroup->servers = (TrackerServerInfo *)malloc(bytes);
if (pDestTrackerGroup->servers == NULL)
{
logError("file: "__FILE__", line: %d, "
"malloc %d bytes fail", __LINE__, bytes);
return errno != 0 ? errno : ENOMEM;
}
pDestTrackerGroup->server_index = 0;
pDestTrackerGroup->leader_index = 0;
pDestTrackerGroup->server_count = pSrcTrackerGroup->server_count;
memcpy(pDestTrackerGroup->servers, pSrcTrackerGroup->servers, bytes);
pDestServerEnd = pDestTrackerGroup->servers +
pDestTrackerGroup->server_count;
for (pDestServer=pDestTrackerGroup->servers;
pDestServer<pDestServerEnd; pDestServer++)
{
fdfs_server_sock_reset(pDestServer);
}
return 0;
}
bool fdfs_tracker_group_equals(TrackerServerGroup *pGroup1,
TrackerServerGroup *pGroup2)
{
TrackerServerInfo *pServer1;
TrackerServerInfo *pServer2;
TrackerServerInfo *pEnd1;
if (pGroup1->server_count != pGroup2->server_count)
{
return false;
}
pEnd1 = pGroup1->servers + pGroup1->server_count;
pServer1 = pGroup1->servers;
pServer2 = pGroup2->servers;
while (pServer1 < pEnd1)
{
if (!fdfs_server_equal(pServer1, pServer2))
{
return false;
}
pServer1++;
pServer2++;
}
return true;
}
void fdfs_client_destroy_ex(TrackerServerGroup *pTrackerGroup)
{
if (pTrackerGroup->servers != NULL)
{
free(pTrackerGroup->servers);
pTrackerGroup->servers = NULL;
pTrackerGroup->server_count = 0;
pTrackerGroup->server_index = 0;
}
}
const char *fdfs_get_file_ext_name_ex(const char *filename,
const bool twoExtName)
{
const char *fileExtName;
const char *p;
const char *pStart;
int extNameLen;
fileExtName = strrchr(filename, '.');
if (fileExtName == NULL)
{
return NULL;
}
extNameLen = strlen(fileExtName + 1);
if (extNameLen > FDFS_FILE_EXT_NAME_MAX_LEN)
{
return NULL;
}
if (strchr(fileExtName + 1, '/') != NULL) //invalid extension name
{
return NULL;
}
if (!twoExtName)
{
return fileExtName + 1;
}
pStart = fileExtName - (FDFS_FILE_EXT_NAME_MAX_LEN - extNameLen) - 1;
if (pStart < filename)
{
pStart = filename;
}
p = fileExtName - 1; //before .
while ((p > pStart) && (*p != '.'))
{
p--;
}
if (p > pStart) //found (extension name have a dot)
{
if (strchr(p + 1, '/') == NULL) //valid extension name
{
return p + 1; //skip .
}
}
return fileExtName + 1; //skip .
}

147
JCEC-fastdfs/client/client_func.h Executable file
View File

@ -0,0 +1,147 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
//client_func.h
#include "tracker_types.h"
#include "client_global.h"
#include "fastcommon/ini_file_reader.h"
#ifndef _CLIENT_FUNC_H_
#define _CLIENT_FUNC_H_
typedef struct {
short file_type;
bool get_from_server;
time_t create_timestamp;
int crc32;
int source_id; //source storage id
int64_t file_size;
char source_ip_addr[IP_ADDRESS_SIZE]; //source storage ip address
} FDFSFileInfo;
#ifdef __cplusplus
extern "C" {
#endif
#define fdfs_client_init(filename) \
fdfs_client_init_ex((&g_tracker_group), filename)
#define fdfs_client_init_from_buffer(buffer) \
fdfs_client_init_from_buffer_ex((&g_tracker_group), buffer)
#define fdfs_client_destroy() \
fdfs_client_destroy_ex((&g_tracker_group))
/**
* client initial from config file
* params:
* pTrackerGroup: tracker group
* conf_filename: client config filename
* return: 0 success, !=0 fail, return the error code
**/
int fdfs_client_init_ex(TrackerServerGroup *pTrackerGroup, \
const char *conf_filename);
/**
* client initial from buffer
* params:
* pTrackerGroup: tracker group
* conf_filename: client config filename
* return: 0 success, !=0 fail, return the error code
**/
int fdfs_client_init_from_buffer_ex(TrackerServerGroup *pTrackerGroup, \
const char *buffer);
/**
* load tracker server group
* params:
* pTrackerGroup: tracker group
* conf_filename: tracker server group config filename
* return: 0 success, !=0 fail, return the error code
**/
int fdfs_load_tracker_group(TrackerServerGroup *pTrackerGroup, \
const char *conf_filename);
/**
* load tracker server group
* params:
* pTrackerGroup: tracker group
* conf_filename: config filename
* items: ini file items
* nItemCount: ini file item count
* return: 0 success, !=0 fail, return the error code
**/
int fdfs_load_tracker_group_ex(TrackerServerGroup *pTrackerGroup, \
const char *conf_filename, IniContext *pIniContext);
/**
* copy tracker server group
* params:
* pDestTrackerGroup: the dest tracker group
* pSrcTrackerGroup: the source tracker group
* return: 0 success, !=0 fail, return the error code
**/
int fdfs_copy_tracker_group(TrackerServerGroup *pDestTrackerGroup, \
TrackerServerGroup *pSrcTrackerGroup);
/**
* client destroy function
* params:
* pTrackerGroup: tracker group
* return: none
**/
void fdfs_client_destroy_ex(TrackerServerGroup *pTrackerGroup);
/**
* tracker group equals
* params:
* pGroup1: tracker group 1
* pGroup2: tracker group 2
* return: true for equals, otherwise false
**/
bool fdfs_tracker_group_equals(TrackerServerGroup *pGroup1, \
TrackerServerGroup *pGroup2);
/**
* get file ext name from filename, extension name do not include dot
* params:
* filename: the filename
* return: file ext name, NULL for no ext name
**/
#define fdfs_get_file_ext_name1(filename) \
fdfs_get_file_ext_name_ex(filename, false)
/**
* get file ext name from filename, extension name maybe include dot
* params:
* filename: the filename
* return: file ext name, NULL for no ext name
**/
#define fdfs_get_file_ext_name2(filename) \
fdfs_get_file_ext_name_ex(filename, true)
#define fdfs_get_file_ext_name(filename) \
fdfs_get_file_ext_name_ex(filename, true)
/**
* get file ext name from filename
* params:
* filename: the filename
* twoExtName: two extension name as the extension name
* return: file ext name, NULL for no ext name
**/
const char *fdfs_get_file_ext_name_ex(const char *filename,
const bool twoExtName);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,23 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdlib.h>
#include <string.h>
#include "client_global.h"
int g_tracker_server_http_port = 80;
TrackerServerGroup g_tracker_group = {0, 0, -1, NULL};
/*
int server_count;
int server_index; //server index for roundrobin
int leader_index; //leader server index
TrackerServerInfo *servers;
*/
bool g_anti_steal_token = false;
BufferInfo g_anti_steal_secret_key = {0};

View File

@ -0,0 +1,35 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
//client_global.h
#ifndef _CLIENT_GLOBAL_H
#define _CLIENT_GLOBAL_H
#include "fastcommon/common_define.h"
#include "tracker_types.h"
#include "fdfs_shared_func.h"
#ifdef __cplusplus
extern "C" {
#endif
extern int g_tracker_server_http_port;
extern TrackerServerGroup g_tracker_group;
extern bool g_anti_steal_token;
extern BufferInfo g_anti_steal_secret_key;
#define fdfs_get_tracker_leader_index(leaderIp, leaderPort) \
fdfs_get_tracker_leader_index_ex(&g_tracker_group, \
leaderIp, leaderPort)
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,66 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fdfs_client.h"
#include "fastcommon/logger.h"
int main(int argc, char *argv[])
{
char *conf_filename;
char *local_filename;
ConnectionInfo *pTrackerServer;
int result;
char appender_file_id[128];
if (argc < 4)
{
printf("Usage: %s <config_file> <appender_file_id> " \
"<local_filename>\n", argv[0]);
return 1;
}
log_init();
g_log_context.log_level = LOG_ERR;
conf_filename = argv[1];
if ((result=fdfs_client_init(conf_filename)) != 0)
{
return result;
}
pTrackerServer = tracker_get_connection();
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
snprintf(appender_file_id, sizeof(appender_file_id), "%s", argv[2]);
local_filename = argv[3];
if ((result=storage_append_by_filename1(pTrackerServer, \
NULL, local_filename, appender_file_id)) != 0)
{
printf("append file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
return result;
}
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return result;
}

View File

@ -0,0 +1,439 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fdfs_client.h"
#include "fdfs_global.h"
#include "fastcommon/base64.h"
#include "fastcommon/sockopt.h"
#include "fastcommon/logger.h"
#include "fdfs_http_shared.h"
int writeToFileCallback(void *arg, const int64_t file_size, const char *data, \
const int current_size)
{
if (arg == NULL)
{
return EINVAL;
}
if (fwrite(data, current_size, 1, (FILE *)arg) != 1)
{
return errno != 0 ? errno : EIO;
}
return 0;
}
int uploadFileCallback(void *arg, const int64_t file_size, int sock)
{
int64_t total_send_bytes;
char *filename;
if (arg == NULL)
{
return EINVAL;
}
filename = (char *)arg;
return tcpsendfile(sock, filename, file_size, \
g_fdfs_network_timeout, &total_send_bytes);
}
int main(int argc, char *argv[])
{
char *conf_filename;
char *local_filename;
ConnectionInfo *pTrackerServer;
ConnectionInfo *pStorageServer;
int result;
ConnectionInfo storageServer;
char group_name[FDFS_GROUP_NAME_MAX_LEN + 1];
char remote_filename[256];
char appender_filename[256];
FDFSMetaData meta_list[32];
int meta_count;
char token[32 + 1];
char file_id[128];
char file_url[256];
char szDatetime[20];
char szPortPart[16];
int url_len;
time_t ts;
int64_t file_offset;
int64_t file_size = 0;
int store_path_index;
FDFSFileInfo file_info;
int upload_type;
const char *file_ext_name;
struct stat stat_buf;
printf("This is FastDFS client test program v%d.%02d\n" \
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
"\nFastDFS may be copied only under the terms of the GNU General\n" \
"Public License V3, which may be found in the FastDFS source kit.\n" \
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
"for more detail.\n\n" \
, g_fdfs_version.major, g_fdfs_version.minor);
if (argc < 3)
{
printf("Usage: %s <config_file> <local_filename> " \
"[FILE | BUFF | CALLBACK]\n", argv[0]);
return 1;
}
log_init();
g_log_context.log_level = LOG_DEBUG;
conf_filename = argv[1];
if ((result=fdfs_client_init(conf_filename)) != 0)
{
return result;
}
pTrackerServer = tracker_get_connection();
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
local_filename = argv[2];
if (argc == 3)
{
upload_type = FDFS_UPLOAD_BY_FILE;
}
else
{
if (strcmp(argv[3], "BUFF") == 0)
{
upload_type = FDFS_UPLOAD_BY_BUFF;
}
else if (strcmp(argv[3], "CALLBACK") == 0)
{
upload_type = FDFS_UPLOAD_BY_CALLBACK;
}
else
{
upload_type = FDFS_UPLOAD_BY_FILE;
}
}
*group_name = '\0';
store_path_index = 0;
if ((result=tracker_query_storage_store(pTrackerServer, \
&storageServer, group_name, &store_path_index)) != 0)
{
fdfs_client_destroy();
printf("tracker_query_storage fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
return result;
}
printf("group_name=%s, ip_addr=%s, port=%d\n", \
group_name, storageServer.ip_addr, \
storageServer.port);
if ((pStorageServer=tracker_make_connection(&storageServer, \
&result)) == NULL)
{
fdfs_client_destroy();
return result;
}
memset(&meta_list, 0, sizeof(meta_list));
meta_count = 0;
strcpy(meta_list[meta_count].name, "ext_name");
strcpy(meta_list[meta_count].value, "jpg");
meta_count++;
strcpy(meta_list[meta_count].name, "width");
strcpy(meta_list[meta_count].value, "160");
meta_count++;
strcpy(meta_list[meta_count].name, "height");
strcpy(meta_list[meta_count].value, "80");
meta_count++;
strcpy(meta_list[meta_count].name, "file_size");
strcpy(meta_list[meta_count].value, "115120");
meta_count++;
file_ext_name = fdfs_get_file_ext_name(local_filename);
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_upload_appender_by_filename ( \
pTrackerServer, pStorageServer, \
store_path_index, local_filename, \
file_ext_name, meta_list, meta_count, \
group_name, remote_filename);
}
else
{
result = errno != 0 ? errno : ENOENT;
}
printf("storage_upload_appender_by_filename\n");
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_upload_appender_by_filebuff( \
pTrackerServer, pStorageServer, \
store_path_index, file_content, \
file_size, file_ext_name, \
meta_list, meta_count, \
group_name, remote_filename);
free(file_content);
}
printf("storage_upload_appender_by_filebuff\n");
}
else
{
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_upload_appender_by_callback( \
pTrackerServer, pStorageServer, \
store_path_index, uploadFileCallback, \
local_filename, file_size, \
file_ext_name, meta_list, meta_count, \
group_name, remote_filename);
}
else
{
result = errno != 0 ? errno : ENOENT;
}
printf("storage_upload_appender_by_callback\n");
}
if (result != 0)
{
printf("upload file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
if (g_tracker_server_http_port == 80)
{
*szPortPart = '\0';
}
else
{
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
}
sprintf(file_id, "%s/%s", group_name, remote_filename);
url_len = sprintf(file_url, "http://%s%s/%s", \
pTrackerServer->ip_addr, szPortPart, file_id);
if (g_anti_steal_token)
{
ts = time(NULL);
fdfs_http_gen_token(&g_anti_steal_secret_key, file_id, \
ts, token);
sprintf(file_url + url_len, "?token=%s&ts=%d", token, (int)ts);
}
printf("group_name=%s, remote_filename=%s\n", \
group_name, remote_filename);
fdfs_get_file_info(group_name, remote_filename, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
printf("file crc32=%u\n", file_info.crc32);
printf("file url: %s\n", file_url);
//sleep(90);
strcpy(appender_filename, remote_filename);
if (storage_truncate_file(pTrackerServer, pStorageServer, \
group_name, appender_filename, file_size * 2) != 0)
{
printf("truncate file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
fdfs_get_file_info(group_name, appender_filename, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
printf("file crc32=%u\n", file_info.crc32);
printf("file url: %s\n", file_url);
if (file_info.file_size != file_size / 2)
{
fprintf(stderr, "file size: %"PRId64 \
" != %"PRId64"!!!", file_info.file_size, file_size / 2);
}
//sleep(100);
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
result = storage_append_by_filename(pTrackerServer, \
pStorageServer, local_filename,
group_name, appender_filename);
printf("storage_append_by_filename\n");
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_append_by_filebuff(pTrackerServer, \
pStorageServer, file_content, \
file_size, group_name, appender_filename);
free(file_content);
}
printf("storage_append_by_filebuff\n");
}
else
{
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_append_by_callback(pTrackerServer, \
pStorageServer, uploadFileCallback, \
local_filename, file_size, \
group_name, appender_filename);
}
else
{
result = errno != 0 ? errno : ENOENT;
}
printf("storage_append_by_callback\n");
}
if (result != 0)
{
printf("append file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
printf("append file successfully.\n");
fdfs_get_file_info(group_name, remote_filename, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
if (file_info.file_size != file_size + file_size / 2)
{
fprintf(stderr, "file size: %"PRId64 \
" != %"PRId64"!!!", file_info.file_size, \
file_size + file_size / 2);
}
file_offset = file_info.file_size;
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
result = storage_modify_by_filename(pTrackerServer, \
pStorageServer, local_filename, \
file_offset, group_name, \
appender_filename);
printf("storage_modify_by_filename\n");
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_modify_by_filebuff(pTrackerServer, \
pStorageServer, file_content, \
file_offset, file_size, group_name, \
appender_filename);
free(file_content);
}
printf("storage_modify_by_filebuff\n");
}
else
{
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_modify_by_callback(pTrackerServer, \
pStorageServer, uploadFileCallback, \
local_filename, file_offset, \
file_size, group_name, appender_filename);
}
else
{
result = errno != 0 ? errno : ENOENT;
}
printf("storage_modify_by_callback\n");
}
if (result != 0)
{
printf("modify file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
printf("modify file successfully.\n");
fdfs_get_file_info(group_name, remote_filename, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
if (file_info.file_size != 2 * file_size + file_size / 2)
{
fprintf(stderr, "file size: %"PRId64 \
" != %"PRId64"!!!", file_info.file_size, \
2 * file_size + file_size /2);
}
tracker_close_connection_ex(pStorageServer, true);
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return result;
}

View File

@ -0,0 +1,435 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fdfs_client.h"
#include "fdfs_global.h"
#include "fastcommon/base64.h"
#include "fastcommon/sockopt.h"
#include "fastcommon/logger.h"
#include "fdfs_http_shared.h"
int writeToFileCallback(void *arg, const int64_t file_size, const char *data, \
const int current_size)
{
if (arg == NULL)
{
return EINVAL;
}
if (fwrite(data, current_size, 1, (FILE *)arg) != 1)
{
return errno != 0 ? errno : EIO;
}
return 0;
}
int uploadFileCallback(void *arg, const int64_t file_size, int sock)
{
int64_t total_send_bytes;
char *filename;
if (arg == NULL)
{
return EINVAL;
}
filename = (char *)arg;
return tcpsendfile(sock, filename, file_size, \
g_fdfs_network_timeout, &total_send_bytes);
}
int main(int argc, char *argv[])
{
char *conf_filename;
char *local_filename;
ConnectionInfo *pTrackerServer;
ConnectionInfo *pStorageServer;
int result;
ConnectionInfo storageServer;
char group_name[FDFS_GROUP_NAME_MAX_LEN + 1];
char file_id[256];
char appender_file_id[256];
FDFSMetaData meta_list[32];
int meta_count;
char token[32 + 1];
char file_url[256];
char szDatetime[20];
char szPortPart[16];
int url_len;
time_t ts;
int64_t file_offset;
int64_t file_size = 0;
int store_path_index;
FDFSFileInfo file_info;
int upload_type;
const char *file_ext_name;
struct stat stat_buf;
printf("This is FastDFS client test program v%d.%02d\n" \
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
"\nFastDFS may be copied only under the terms of the GNU General\n" \
"Public License V3, which may be found in the FastDFS source kit.\n" \
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
"for more detail.\n\n" \
, g_fdfs_version.major, g_fdfs_version.minor);
if (argc < 3)
{
printf("Usage: %s <config_file> <local_filename> " \
"[FILE | BUFF | CALLBACK]\n", argv[0]);
return 1;
}
log_init();
g_log_context.log_level = LOG_DEBUG;
conf_filename = argv[1];
if ((result=fdfs_client_init(conf_filename)) != 0)
{
return result;
}
pTrackerServer = tracker_get_connection();
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
local_filename = argv[2];
if (argc == 3)
{
upload_type = FDFS_UPLOAD_BY_FILE;
}
else
{
if (strcmp(argv[3], "BUFF") == 0)
{
upload_type = FDFS_UPLOAD_BY_BUFF;
}
else if (strcmp(argv[3], "CALLBACK") == 0)
{
upload_type = FDFS_UPLOAD_BY_CALLBACK;
}
else
{
upload_type = FDFS_UPLOAD_BY_FILE;
}
}
store_path_index = 0;
*group_name = '\0';
if ((result=tracker_query_storage_store(pTrackerServer, \
&storageServer, group_name, &store_path_index)) != 0)
{
fdfs_client_destroy();
printf("tracker_query_storage fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
return result;
}
printf("group_name=%s, ip_addr=%s, port=%d\n", \
group_name, storageServer.ip_addr, \
storageServer.port);
if ((pStorageServer=tracker_make_connection(&storageServer, \
&result)) == NULL)
{
fdfs_client_destroy();
return result;
}
memset(&meta_list, 0, sizeof(meta_list));
meta_count = 0;
strcpy(meta_list[meta_count].name, "ext_name");
strcpy(meta_list[meta_count].value, "jpg");
meta_count++;
strcpy(meta_list[meta_count].name, "width");
strcpy(meta_list[meta_count].value, "160");
meta_count++;
strcpy(meta_list[meta_count].name, "height");
strcpy(meta_list[meta_count].value, "80");
meta_count++;
strcpy(meta_list[meta_count].name, "file_size");
strcpy(meta_list[meta_count].value, "115120");
meta_count++;
file_ext_name = fdfs_get_file_ext_name(local_filename);
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_upload_appender_by_filename1( \
pTrackerServer, pStorageServer, \
store_path_index, local_filename, \
file_ext_name, meta_list, meta_count, \
group_name, file_id);
}
else
{
result = errno != 0 ? errno : ENOENT;
}
printf("storage_upload_appender_by_filename1\n");
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_upload_appender_by_filebuff1( \
pTrackerServer, pStorageServer, \
store_path_index, file_content, \
file_size, file_ext_name, \
meta_list, meta_count, \
group_name, file_id);
free(file_content);
}
printf("storage_upload_appender_by_filebuff1\n");
}
else
{
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_upload_appender_by_callback1( \
pTrackerServer, pStorageServer, \
store_path_index, uploadFileCallback, \
local_filename, file_size, \
file_ext_name, meta_list, meta_count, \
group_name, file_id);
}
else
{
result = errno != 0 ? errno : ENOENT;
}
printf("storage_upload_appender_by_callback1\n");
}
if (result != 0)
{
printf("upload file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
if (g_tracker_server_http_port == 80)
{
*szPortPart = '\0';
}
else
{
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
}
url_len = sprintf(file_url, "http://%s%s/%s", \
pTrackerServer->ip_addr, szPortPart, file_id);
if (g_anti_steal_token)
{
ts = time(NULL);
fdfs_http_gen_token(&g_anti_steal_secret_key, file_id, \
ts, token);
sprintf(file_url + url_len, "?token=%s&ts=%d", token, (int)ts);
}
printf("fild_id=%s\n", file_id);
fdfs_get_file_info1(file_id, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
printf("file crc32=%u\n", file_info.crc32);
printf("file url: %s\n", file_url);
strcpy(appender_file_id, file_id);
if (storage_truncate_file1(pTrackerServer, pStorageServer, \
appender_file_id, 0) != 0)
{
printf("truncate file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
fdfs_get_file_info1(file_id, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
printf("file crc32=%u\n", file_info.crc32);
printf("file url: %s\n", file_url);
if (file_info.file_size != 0)
{
fprintf(stderr, "file size: %"PRId64 \
" != 0!!!", file_info.file_size);
}
//sleep(70);
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
result = storage_append_by_filename1(pTrackerServer, \
pStorageServer, local_filename,
appender_file_id);
printf("storage_append_by_filename\n");
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_append_by_filebuff1(pTrackerServer, \
pStorageServer, file_content, \
file_size, appender_file_id);
free(file_content);
}
printf("storage_append_by_filebuff1\n");
}
else
{
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_append_by_callback1(pTrackerServer, \
pStorageServer, uploadFileCallback, \
local_filename, file_size, \
appender_file_id);
}
else
{
result = errno != 0 ? errno : ENOENT;
}
printf("storage_append_by_callback1\n");
}
if (result != 0)
{
printf("append file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
printf("append file successfully.\n");
fdfs_get_file_info1(appender_file_id, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
if (file_info.file_size != file_size)
{
fprintf(stderr, "file size: %"PRId64 \
" != %"PRId64"!!!", file_info.file_size, \
file_size);
}
file_offset = file_size;
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
result = storage_modify_by_filename1(pTrackerServer, \
pStorageServer, local_filename,
file_offset, appender_file_id);
printf("storage_modify_by_filename\n");
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_modify_by_filebuff1( \
pTrackerServer, pStorageServer, \
file_content, file_offset, file_size, \
appender_file_id);
free(file_content);
}
printf("storage_modify_by_filebuff1\n");
}
else
{
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_modify_by_callback1( \
pTrackerServer, pStorageServer, \
uploadFileCallback, \
local_filename, file_offset, \
file_size, appender_file_id);
}
else
{
result = errno != 0 ? errno : ENOENT;
}
printf("storage_modify_by_callback1\n");
}
if (result != 0)
{
printf("modify file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
printf("modify file successfully.\n");
fdfs_get_file_info1(appender_file_id, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
if (file_info.file_size != 2 * file_size)
{
fprintf(stderr, "file size: %"PRId64 \
" != %"PRId64"!!!", file_info.file_size, \
2 * file_size);
}
tracker_close_connection_ex(pStorageServer, true);
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return result;
}

View File

@ -0,0 +1,22 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#ifndef FDFS_CLIENT_H
#define FDFS_CLIENT_H
#include "fastcommon/shared_func.h"
#include "tracker_types.h"
#include "tracker_proto.h"
#include "tracker_client.h"
#include "storage_client.h"
#include "storage_client1.h"
#include "client_func.h"
#include "client_global.h"
#endif

105
JCEC-fastdfs/client/fdfs_crc32.c Executable file
View File

@ -0,0 +1,105 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <fcntl.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fastcommon/hash.h"
int main(int argc, char *argv[])
{
int64_t file_size;
int64_t remain_bytes;
char *filename;
int fd;
int read_bytes;
int result;
int64_t crc32;
char buff[512 * 1024];
if (argc < 2)
{
printf("Usage: %s <filename>\n", argv[0]);
return 1;
}
filename = argv[1];
fd = open(filename, O_RDONLY);
if (fd < 0)
{
printf("file: "__FILE__", line: %d, " \
"open file %s fail, " \
"errno: %d, error info: %s\n", \
__LINE__, filename, errno, STRERROR(errno));
return errno != 0 ? errno : EACCES;
}
if ((file_size=lseek(fd, 0, SEEK_END)) < 0)
{
printf("file: "__FILE__", line: %d, " \
"call lseek fail, " \
"errno: %d, error info: %s\n", \
__LINE__, errno, STRERROR(errno));
close(fd);
return errno;
}
if (lseek(fd, 0, SEEK_SET) < 0)
{
printf("file: "__FILE__", line: %d, " \
"call lseek fail, " \
"errno: %d, error info: %s\n", \
__LINE__, errno, STRERROR(errno));
close(fd);
return errno;
}
crc32 = CRC32_XINIT;
result = 0;
remain_bytes = file_size;
while (remain_bytes > 0)
{
if (remain_bytes > sizeof(buff))
{
read_bytes = sizeof(buff);
}
else
{
read_bytes = remain_bytes;
}
if (read(fd, buff, read_bytes) != read_bytes)
{
printf("file: "__FILE__", line: %d, " \
"call lseek fail, " \
"errno: %d, error info: %s\n", \
__LINE__, errno, STRERROR(errno));
result = errno != 0 ? errno : EIO;
break;
}
crc32 = CRC32_ex(buff, read_bytes, crc32);
remain_bytes -= read_bytes;
}
close(fd);
if (result == 0)
{
crc32 = CRC32_FINAL(crc32);
printf("%x\n", (int)crc32);
}
return result;
}

View File

@ -0,0 +1,62 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fdfs_client.h"
#include "fastcommon/logger.h"
int main(int argc, char *argv[])
{
char *conf_filename;
ConnectionInfo *pTrackerServer;
int result;
char file_id[128];
if (argc < 3)
{
printf("Usage: %s <config_file> <file_id>\n", argv[0]);
return 1;
}
log_init();
g_log_context.log_level = LOG_ERR;
ignore_signal_pipe();
conf_filename = argv[1];
if ((result=fdfs_client_init(conf_filename)) != 0)
{
return result;
}
pTrackerServer = tracker_get_connection();
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
snprintf(file_id, sizeof(file_id), "%s", argv[2]);
if ((result=storage_delete_file1(pTrackerServer, NULL, file_id)) != 0)
{
printf("delete file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return result;
}

View File

@ -0,0 +1,97 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fdfs_client.h"
#include "fastcommon/logger.h"
int main(int argc, char *argv[])
{
char *conf_filename;
char *local_filename;
ConnectionInfo *pTrackerServer;
int result;
char file_id[128];
int64_t file_size;
int64_t file_offset;
int64_t download_bytes;
if (argc < 3)
{
printf("Usage: %s <config_file> <file_id> " \
"[local_filename] [<download_offset> " \
"<download_bytes>]\n", argv[0]);
return 1;
}
log_init();
g_log_context.log_level = LOG_ERR;
ignore_signal_pipe();
conf_filename = argv[1];
if ((result=fdfs_client_init(conf_filename)) != 0)
{
return result;
}
pTrackerServer = tracker_get_connection();
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
snprintf(file_id, sizeof(file_id), "%s", argv[2]);
file_offset = 0;
download_bytes = 0;
if (argc >= 4)
{
local_filename = argv[3];
if (argc >= 6)
{
file_offset = strtoll(argv[4], NULL, 10);
download_bytes = strtoll(argv[5], NULL, 10);
}
}
else
{
local_filename = strrchr(file_id, '/');
if (local_filename != NULL)
{
local_filename++; //skip /
}
else
{
local_filename = file_id;
}
}
result = storage_do_download_file1_ex(pTrackerServer, \
NULL, FDFS_DOWNLOAD_TO_FILE, file_id, \
file_offset, download_bytes, \
&local_filename, NULL, &file_size);
if (result != 0)
{
printf("download file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return 0;
}

View File

@ -0,0 +1,91 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fdfs_client.h"
#include "fastcommon/logger.h"
int main(int argc, char *argv[])
{
char *conf_filename;
const char *file_type_str;
char file_id[128];
int result;
FDFSFileInfo file_info;
if (argc < 3)
{
printf("Usage: %s <config_file> <file_id>\n", argv[0]);
return 1;
}
log_init();
g_log_context.log_level = LOG_ERR;
ignore_signal_pipe();
conf_filename = argv[1];
if ((result=fdfs_client_init(conf_filename)) != 0)
{
return result;
}
snprintf(file_id, sizeof(file_id), "%s", argv[2]);
memset(&file_info, 0, sizeof(file_info));
result = fdfs_get_allonline_file_info_ex1(file_id, true, &file_info);
if (result != 0)
{
fprintf(stderr, "query file info fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
else
{
char szDatetime[32];
switch (file_info.file_type)
{
case FDFS_FILE_TYPE_NORMAL:
file_type_str = "normal";
break;
case FDFS_FILE_TYPE_SLAVE:
file_type_str = "slave";
break;
case FDFS_FILE_TYPE_APPENDER:
file_type_str = "appender";
break;
default:
file_type_str = "unkown";
break;
}
printf("GET FROM SERVER: %s\n\n",
file_info.get_from_server ? "true" : "false");
printf("file type: %s\n", file_type_str);
printf("source storage id: %d\n", file_info.source_id);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file create timestamp: %s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size: %"PRId64"\n", \
file_info.file_size);
printf("file crc32: %d (0x%08x)\n", \
file_info.crc32, file_info.crc32);
}
tracker_close_all_connections();
fdfs_client_destroy();
return 0;
}

View File

@ -0,0 +1,28 @@
tmp_src_filename=_fdfs_check_bits_.c
cat <<EOF > $tmp_src_filename
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
int main()
{
printf("%d\n", (int)sizeof(long));
return 0;
}
EOF
gcc -D_FILE_OFFSET_BITS=64 -o a.out $tmp_src_filename
OS_BITS=`./a.out`
rm $tmp_src_filename a.out
TARGET_LIB="$(TARGET_PREFIX)/lib"
if [ "`id -u`" = "0" ]; then
ln -fs $TARGET_LIB/libfastcommon.so.1 /usr/lib/libfastcommon.so
ln -fs $TARGET_LIB/libfdfsclient.so.1 /usr/lib/libfdfsclient.so
if [ "$OS_BITS" = "8" ]; then
ln -fs $TARGET_LIB/libfastcommon.so.1 /usr/lib64/libfastcommon.so
ln -fs $TARGET_LIB/libfdfsclient.so.1 /usr/lib64/libfdfsclient.so
fi
fi

View File

@ -0,0 +1,601 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <signal.h>
#include <netdb.h>
#include <sys/types.h>
#include "fastcommon/sockopt.h"
#include "fastcommon/logger.h"
#include "client_global.h"
#include "fdfs_global.h"
#include "fdfs_client.h"
static ConnectionInfo *pTrackerServer;
static int list_all_groups(const char *group_name);
static void usage(char *argv[])
{
printf("Usage: %s <config_file> [-h <tracker_server>] "
"[list|delete|set_trunk_server <group_name> [storage_id]]\n"
"\tthe tracker server format: host[:port], "
"the tracker default port is %d\n\n",
argv[0], FDFS_TRACKER_SERVER_DEF_PORT);
}
int main(int argc, char *argv[])
{
char *conf_filename;
int result;
char *op_type;
char *tracker_server;
int arg_index;
char *group_name;
if (argc < 2)
{
usage(argv);
return 1;
}
tracker_server = NULL;
conf_filename = argv[1];
arg_index = 2;
if (arg_index >= argc)
{
op_type = "list";
}
else
{
int len;
len = strlen(argv[arg_index]);
if (len >= 2 && strncmp(argv[arg_index], "-h", 2) == 0)
{
if (len == 2)
{
arg_index++;
if (arg_index >= argc)
{
usage(argv);
return 1;
}
tracker_server = argv[arg_index++];
}
else
{
tracker_server = argv[arg_index] + 2;
arg_index++;
}
if (arg_index < argc)
{
op_type = argv[arg_index++];
}
else
{
op_type = "list";
}
}
else
{
op_type = argv[arg_index++];
}
}
log_init();
g_log_context.log_level = LOG_DEBUG;
ignore_signal_pipe();
if ((result=fdfs_client_init(conf_filename)) != 0)
{
return result;
}
load_log_level_ex(conf_filename);
if (tracker_server == NULL)
{
if (g_tracker_group.server_count > 1)
{
srand(time(NULL));
rand(); //discard the first
g_tracker_group.server_index = (int)( \
(g_tracker_group.server_count * (double)rand()) \
/ (double)RAND_MAX);
}
}
else
{
int i;
ConnectionInfo conn;
if ((result=conn_pool_parse_server_info(tracker_server, &conn,
FDFS_TRACKER_SERVER_DEF_PORT)) != 0)
{
printf("resolve ip address of tracker server: %s "
"fail!, error info: %s\n", tracker_server, hstrerror(h_errno));
return result;
}
for (i=0; i<g_tracker_group.server_count; i++)
{
if (fdfs_server_contain1(g_tracker_group.servers + i, &conn))
{
fdfs_set_server_info_index1(g_tracker_group.servers + i, &conn);
g_tracker_group.server_index = i;
break;
}
}
if (i == g_tracker_group.server_count)
{
printf("tracker server: %s not exists!\n", tracker_server);
return 2;
}
}
printf("server_count=%d, server_index=%d\n",
g_tracker_group.server_count, g_tracker_group.server_index);
pTrackerServer = tracker_get_connection();
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
printf("\ntracker server is %s:%d\n\n", pTrackerServer->ip_addr, pTrackerServer->port);
if (arg_index < argc)
{
group_name = argv[arg_index++];
}
else
{
group_name = NULL;
}
if (strcmp(op_type, "list") == 0)
{
if (group_name == NULL)
{
result = list_all_groups(NULL);
}
else
{
result = list_all_groups(group_name);
}
}
else if (strcmp(op_type, "delete") == 0)
{
if (arg_index >= argc)
{
if ((result=tracker_delete_group(&g_tracker_group, \
group_name)) == 0)
{
printf("delete group: %s success\n", \
group_name);
}
else
{
printf("delete group: %s fail, " \
"error no: %d, error info: %s\n", \
group_name, result, STRERROR(result));
}
}
else
{
char *storage_id;
storage_id = argv[arg_index++];
if ((result=tracker_delete_storage(&g_tracker_group, \
group_name, storage_id)) == 0)
{
printf("delete storage server %s::%s success\n", \
group_name, storage_id);
}
else
{
printf("delete storage server %s::%s fail, " \
"error no: %d, error info: %s\n", \
group_name, storage_id, \
result, STRERROR(result));
}
}
}
else if (strcmp(op_type, "set_trunk_server") == 0)
{
char *storage_id;
char new_trunk_server_id[FDFS_STORAGE_ID_MAX_SIZE];
if (group_name == NULL)
{
usage(argv);
return 1;
}
if (arg_index >= argc)
{
storage_id = "";
}
else
{
storage_id = argv[arg_index++];
}
if ((result=tracker_set_trunk_server(&g_tracker_group, \
group_name, storage_id, new_trunk_server_id)) == 0)
{
printf("set trunk server %s::%s success, " \
"new trunk server: %s\n", group_name, \
storage_id, new_trunk_server_id);
}
else
{
printf("set trunk server %s::%s fail, " \
"error no: %d, error info: %s\n", \
group_name, storage_id, \
result, STRERROR(result));
}
}
else
{
printf("Invalid command %s\n\n", op_type);
usage(argv);
}
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return 0;
}
static int list_storages(FDFSGroupStat *pGroupStat)
{
int result;
int storage_count;
FDFSStorageInfo storage_infos[FDFS_MAX_SERVERS_EACH_GROUP];
FDFSStorageInfo *p;
FDFSStorageInfo *pStorage;
FDFSStorageInfo *pStorageEnd;
FDFSStorageStat *pStorageStat;
char szJoinTime[32];
char szUpTime[32];
char szLastHeartBeatTime[32];
char szSrcUpdTime[32];
char szSyncUpdTime[32];
char szSyncedTimestamp[32];
char szSyncedDelaySeconds[128];
char szHostname[128];
char szHostnamePrompt[128+8];
char szDiskTotalSpace[32];
char szDiskFreeSpace[32];
char szTrunkSpace[32];
int k;
int max_last_source_update;
printf( "group name = %s\n"
"disk total space = %s MB\n"
"disk free space = %s MB\n"
"trunk free space = %s MB\n"
"storage server count = %d\n"
"active server count = %d\n"
"storage server port = %d\n"
"storage HTTP port = %d\n"
"store path count = %d\n"
"subdir count per path = %d\n"
"current write server index = %d\n"
"current trunk file id = %d\n\n",
pGroupStat->group_name,
long_to_comma_str(pGroupStat->total_mb, szDiskTotalSpace),
long_to_comma_str(pGroupStat->free_mb, szDiskFreeSpace),
long_to_comma_str(pGroupStat->trunk_free_mb, szTrunkSpace),
pGroupStat->count,
pGroupStat->active_count,
pGroupStat->storage_port,
pGroupStat->storage_http_port,
pGroupStat->store_path_count,
pGroupStat->subdir_count_per_path,
pGroupStat->current_write_server,
pGroupStat->current_trunk_file_id
);
result = tracker_list_servers(pTrackerServer, \
pGroupStat->group_name, NULL, \
storage_infos, FDFS_MAX_SERVERS_EACH_GROUP, \
&storage_count);
if (result != 0)
{
return result;
}
k = 0;
pStorageEnd = storage_infos + storage_count;
for (pStorage=storage_infos; pStorage<pStorageEnd; \
pStorage++)
{
max_last_source_update = 0;
for (p=storage_infos; p<pStorageEnd; p++)
{
if (p != pStorage && p->stat.last_source_update
> max_last_source_update)
{
max_last_source_update = \
p->stat.last_source_update;
}
}
pStorageStat = &(pStorage->stat);
if (max_last_source_update == 0)
{
*szSyncedDelaySeconds = '\0';
}
else
{
if (pStorageStat->last_synced_timestamp == 0)
{
strcpy(szSyncedDelaySeconds, "(never synced)");
}
else
{
int delay_seconds;
int remain_seconds;
int day;
int hour;
int minute;
int second;
char szDelayTime[64];
delay_seconds = (int)(max_last_source_update -
pStorageStat->last_synced_timestamp);
if (delay_seconds < 0)
{
delay_seconds = 0;
}
day = delay_seconds / (24 * 3600);
remain_seconds = delay_seconds % (24 * 3600);
hour = remain_seconds / 3600;
remain_seconds %= 3600;
minute = remain_seconds / 60;
second = remain_seconds % 60;
if (day != 0)
{
sprintf(szDelayTime, "%d days " \
"%02dh:%02dm:%02ds", \
day, hour, minute, second);
}
else if (hour != 0)
{
sprintf(szDelayTime, "%02dh:%02dm:%02ds", \
hour, minute, second);
}
else if (minute != 0)
{
sprintf(szDelayTime, "%02dm:%02ds", minute, second);
}
else
{
sprintf(szDelayTime, "%ds", second);
}
sprintf(szSyncedDelaySeconds, "(%s delay)", szDelayTime);
}
}
//getHostnameByIp(pStorage->ip_addr, szHostname, sizeof(szHostname));
*szHostname = '\0';
if (*szHostname != '\0')
{
sprintf(szHostnamePrompt, " (%s)", szHostname);
}
else
{
*szHostnamePrompt = '\0';
}
if (pStorage->up_time != 0)
{
formatDatetime(pStorage->up_time, \
"%Y-%m-%d %H:%M:%S", \
szUpTime, sizeof(szUpTime));
}
else
{
*szUpTime = '\0';
}
printf( "\tStorage %d:\n"
"\t\tid = %s\n"
"\t\tip_addr = %s%s %s\n"
"\t\thttp domain = %s\n"
"\t\tversion = %s\n"
"\t\tjoin time = %s\n"
"\t\tup time = %s\n"
"\t\ttotal storage = %s MB\n"
"\t\tfree storage = %s MB\n"
"\t\tupload priority = %d\n"
"\t\tstore_path_count = %d\n"
"\t\tsubdir_count_per_path = %d\n"
"\t\tstorage_port = %d\n"
"\t\tstorage_http_port = %d\n"
"\t\tcurrent_write_path = %d\n"
"\t\tsource storage id = %s\n"
"\t\tif_trunk_server = %d\n"
"\t\tconnection.alloc_count = %d\n"
"\t\tconnection.current_count = %d\n"
"\t\tconnection.max_count = %d\n"
"\t\ttotal_upload_count = %"PRId64"\n"
"\t\tsuccess_upload_count = %"PRId64"\n"
"\t\ttotal_append_count = %"PRId64"\n"
"\t\tsuccess_append_count = %"PRId64"\n"
"\t\ttotal_modify_count = %"PRId64"\n"
"\t\tsuccess_modify_count = %"PRId64"\n"
"\t\ttotal_truncate_count = %"PRId64"\n"
"\t\tsuccess_truncate_count = %"PRId64"\n"
"\t\ttotal_set_meta_count = %"PRId64"\n"
"\t\tsuccess_set_meta_count = %"PRId64"\n"
"\t\ttotal_delete_count = %"PRId64"\n"
"\t\tsuccess_delete_count = %"PRId64"\n"
"\t\ttotal_download_count = %"PRId64"\n"
"\t\tsuccess_download_count = %"PRId64"\n"
"\t\ttotal_get_meta_count = %"PRId64"\n"
"\t\tsuccess_get_meta_count = %"PRId64"\n"
"\t\ttotal_create_link_count = %"PRId64"\n"
"\t\tsuccess_create_link_count = %"PRId64"\n"
"\t\ttotal_delete_link_count = %"PRId64"\n"
"\t\tsuccess_delete_link_count = %"PRId64"\n"
"\t\ttotal_upload_bytes = %"PRId64"\n"
"\t\tsuccess_upload_bytes = %"PRId64"\n"
"\t\ttotal_append_bytes = %"PRId64"\n"
"\t\tsuccess_append_bytes = %"PRId64"\n"
"\t\ttotal_modify_bytes = %"PRId64"\n"
"\t\tsuccess_modify_bytes = %"PRId64"\n"
"\t\tstotal_download_bytes = %"PRId64"\n"
"\t\tsuccess_download_bytes = %"PRId64"\n"
"\t\ttotal_sync_in_bytes = %"PRId64"\n"
"\t\tsuccess_sync_in_bytes = %"PRId64"\n"
"\t\ttotal_sync_out_bytes = %"PRId64"\n"
"\t\tsuccess_sync_out_bytes = %"PRId64"\n"
"\t\ttotal_file_open_count = %"PRId64"\n"
"\t\tsuccess_file_open_count = %"PRId64"\n"
"\t\ttotal_file_read_count = %"PRId64"\n"
"\t\tsuccess_file_read_count = %"PRId64"\n"
"\t\ttotal_file_write_count = %"PRId64"\n"
"\t\tsuccess_file_write_count = %"PRId64"\n"
"\t\tlast_heart_beat_time = %s\n"
"\t\tlast_source_update = %s\n"
"\t\tlast_sync_update = %s\n"
"\t\tlast_synced_timestamp = %s %s\n",
++k, pStorage->id, pStorage->ip_addr,
szHostnamePrompt, get_storage_status_caption(
pStorage->status), pStorage->domain_name,
pStorage->version,
formatDatetime(pStorage->join_time,
"%Y-%m-%d %H:%M:%S",
szJoinTime, sizeof(szJoinTime)), szUpTime,
long_to_comma_str(pStorage->total_mb, szDiskTotalSpace),
long_to_comma_str(pStorage->free_mb, szDiskFreeSpace),
pStorage->upload_priority,
pStorage->store_path_count,
pStorage->subdir_count_per_path,
pStorage->storage_port,
pStorage->storage_http_port,
pStorage->current_write_path,
pStorage->src_id,
pStorage->if_trunk_server,
pStorageStat->connection.alloc_count,
pStorageStat->connection.current_count,
pStorageStat->connection.max_count,
pStorageStat->total_upload_count,
pStorageStat->success_upload_count,
pStorageStat->total_append_count,
pStorageStat->success_append_count,
pStorageStat->total_modify_count,
pStorageStat->success_modify_count,
pStorageStat->total_truncate_count,
pStorageStat->success_truncate_count,
pStorageStat->total_set_meta_count,
pStorageStat->success_set_meta_count,
pStorageStat->total_delete_count,
pStorageStat->success_delete_count,
pStorageStat->total_download_count,
pStorageStat->success_download_count,
pStorageStat->total_get_meta_count,
pStorageStat->success_get_meta_count,
pStorageStat->total_create_link_count,
pStorageStat->success_create_link_count,
pStorageStat->total_delete_link_count,
pStorageStat->success_delete_link_count,
pStorageStat->total_upload_bytes,
pStorageStat->success_upload_bytes,
pStorageStat->total_append_bytes,
pStorageStat->success_append_bytes,
pStorageStat->total_modify_bytes,
pStorageStat->success_modify_bytes,
pStorageStat->total_download_bytes,
pStorageStat->success_download_bytes,
pStorageStat->total_sync_in_bytes,
pStorageStat->success_sync_in_bytes,
pStorageStat->total_sync_out_bytes,
pStorageStat->success_sync_out_bytes,
pStorageStat->total_file_open_count,
pStorageStat->success_file_open_count,
pStorageStat->total_file_read_count,
pStorageStat->success_file_read_count,
pStorageStat->total_file_write_count,
pStorageStat->success_file_write_count,
formatDatetime(pStorageStat->last_heart_beat_time,
"%Y-%m-%d %H:%M:%S",
szLastHeartBeatTime, sizeof(szLastHeartBeatTime)),
formatDatetime(pStorageStat->last_source_update,
"%Y-%m-%d %H:%M:%S",
szSrcUpdTime, sizeof(szSrcUpdTime)),
formatDatetime(pStorageStat->last_sync_update,
"%Y-%m-%d %H:%M:%S",
szSyncUpdTime, sizeof(szSyncUpdTime)),
formatDatetime(pStorageStat->last_synced_timestamp,
"%Y-%m-%d %H:%M:%S",
szSyncedTimestamp, sizeof(szSyncedTimestamp)),
szSyncedDelaySeconds);
}
return 0;
}
static int list_all_groups(const char *group_name)
{
int result;
int group_count;
FDFSGroupStat group_stats[FDFS_MAX_GROUPS];
FDFSGroupStat *pGroupStat;
FDFSGroupStat *pGroupEnd;
int i;
result = tracker_list_groups(pTrackerServer, \
group_stats, FDFS_MAX_GROUPS, \
&group_count);
if (result != 0)
{
tracker_close_all_connections();
fdfs_client_destroy();
return result;
}
pGroupEnd = group_stats + group_count;
if (group_name == NULL)
{
printf("group count: %d\n", group_count);
i = 0;
for (pGroupStat=group_stats; pGroupStat<pGroupEnd; \
pGroupStat++)
{
printf( "\nGroup %d:\n", ++i);
list_storages(pGroupStat);
}
}
else
{
for (pGroupStat=group_stats; pGroupStat<pGroupEnd; \
pGroupStat++)
{
if (strcmp(pGroupStat->group_name, group_name) == 0)
{
list_storages(pGroupStat);
break;
}
}
}
return 0;
}

View File

@ -0,0 +1,68 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fdfs_client.h"
#include "fastcommon/logger.h"
int main(int argc, char *argv[])
{
char *conf_filename;
ConnectionInfo *pTrackerServer;
int result;
char appender_file_id[128];
char new_file_id[128];
if (argc < 3)
{
fprintf(stderr, "regenerate filename for the appender file.\n"
"NOTE: the regenerated file will be a normal file!\n"
"Usage: %s <config_file> <appender_file_id>\n",
argv[0]);
return 1;
}
log_init();
g_log_context.log_level = LOG_ERR;
conf_filename = argv[1];
if ((result=fdfs_client_init(conf_filename)) != 0)
{
return result;
}
pTrackerServer = tracker_get_connection();
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
snprintf(appender_file_id, sizeof(appender_file_id), "%s", argv[2]);
if ((result=storage_regenerate_appender_filename1(pTrackerServer,
NULL, appender_file_id, new_file_id)) != 0)
{
fprintf(stderr, "regenerate file %s fail, "
"error no: %d, error info: %s\n",
appender_file_id, result, STRERROR(result));
return result;
}
printf("%s\n", new_file_id);
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return result;
}

692
JCEC-fastdfs/client/fdfs_test.c Executable file
View File

@ -0,0 +1,692 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fdfs_client.h"
#include "fdfs_global.h"
#include "fastcommon/base64.h"
#include "fastcommon/sockopt.h"
#include "fastcommon/logger.h"
#include "fdfs_http_shared.h"
int writeToFileCallback(void *arg, const int64_t file_size, const char *data, \
const int current_size)//一次两个pkt//调用多次完成写入
{
if (arg == NULL)
{
return EINVAL;
}
if (fwrite(data, current_size, 1, (FILE *)arg) != 1)
{
return errno != 0 ? errno : EIO;
}
return 0;
}
int uploadFileCallback(void *arg, const int64_t file_size, int sock)
{
int64_t total_send_bytes;
char *filename;
if (arg == NULL)
{
return EINVAL;
}
filename = (char *)arg;
return tcpsendfile(sock, filename, file_size, \
g_fdfs_network_timeout, &total_send_bytes);//tcpsenddata_blockqueue
}
int main(int argc, char *argv[])
{
char *conf_filename;
char *local_filename;
ConnectionInfo *pTrackerServer;
ConnectionInfo *pStorageServer;
int result;
ConnectionInfo storageServer;
char group_name[FDFS_GROUP_NAME_MAX_LEN + 1];
char remote_filename[256];
char master_filename[256];
FDFSMetaData meta_list[32];
int meta_count;
int i;
FDFSMetaData *pMetaList;
char token[32 + 1];
char file_id[128];
char file_url[256];
char szDatetime[20];
char szPortPart[16];
int url_len;
time_t ts;
char *file_buff;
int64_t file_size;
char *operation;
char *meta_buff;
int store_path_index;
FDFSFileInfo file_info;
printf("This is FastDFS client test program v%d.%02d\n" \
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
"\nFastDFS may be copied only under the terms of the GNU General\n" \
"Public License V3, which may be found in the FastDFS source kit.\n" \
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
"for more detail.\n\n" \
, g_fdfs_version.major, g_fdfs_version.minor);
if (argc < 3)
{
printf("Usage: %s <config_file> <operation>\n" \
"\toperation: upload, download, getmeta, setmeta, " \
"delete and query_servers\n", argv[0]);
return 1;
}
log_init();
g_log_context.log_level = LOG_DEBUG;
conf_filename = argv[1];
operation = argv[2];
if ((result=fdfs_client_init(conf_filename)) != 0)//bh 初始化客户端
{
return result;
}
pTrackerServer = tracker_get_connection(); //bh 连接tracker
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
pStorageServer = NULL;
*group_name = '\0';
local_filename = NULL;
if (strcmp(operation, "upload") == 0)
{
int upload_type;
char *prefix_name;
const char *file_ext_name;
char slave_filename[256];
int slave_filename_len;
if (argc < 4)
{
printf("Usage: %s <config_file> upload " \
"<local_filename> [FILE | BUFF | CALLBACK] \n",\
argv[0]);
fdfs_client_destroy();
return EINVAL;
}
local_filename = argv[3];
if (argc == 4)
{
upload_type = FDFS_UPLOAD_BY_FILE;
}
else
{
if (strcmp(argv[4], "BUFF") == 0)
{
upload_type = FDFS_UPLOAD_BY_BUFF;
}
else if (strcmp(argv[4], "CALLBACK") == 0)
{
upload_type = FDFS_UPLOAD_BY_CALLBACK;
}
else
{
upload_type = FDFS_UPLOAD_BY_FILE;
}
}
store_path_index = 0;
{
ConnectionInfo storageServers[FDFS_MAX_SERVERS_EACH_GROUP];
ConnectionInfo *pServer;
ConnectionInfo *pServerEnd;
int storage_count;
if ((result=tracker_query_storage_store_list_without_group( \
pTrackerServer, storageServers, \
FDFS_MAX_SERVERS_EACH_GROUP, &storage_count, \
group_name, &store_path_index)) == 0)
{
printf("tracker_query_storage_store_list_without_group: \n");
pServerEnd = storageServers + storage_count;
for (pServer=storageServers; pServer<pServerEnd; pServer++)
{
printf("\tserver %d. group_name=%s, " \
"ip_addr=%s, port=%d\n", \
(int)(pServer - storageServers) + 1, \
group_name, pServer->ip_addr, pServer->port);
}
printf("\n");
}
}
if ((result=tracker_query_storage_store(pTrackerServer, \
&storageServer, group_name, &store_path_index)) != 0)
{
fdfs_client_destroy();
printf("tracker_query_storage fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
return result;
}
printf("group_name=%s, ip_addr=%s, port=%d\n", \
group_name, storageServer.ip_addr, \
storageServer.port);
if ((pStorageServer=tracker_make_connection(&storageServer, \
&result)) == NULL)
{
fdfs_client_destroy();
return result;
}
memset(&meta_list, 0, sizeof(meta_list));
meta_count = 0;
strcpy(meta_list[meta_count].name, "ext_name");
strcpy(meta_list[meta_count].value, "jpg");
meta_count++;
strcpy(meta_list[meta_count].name, "width");
strcpy(meta_list[meta_count].value, "160");
meta_count++;
strcpy(meta_list[meta_count].name, "height");
strcpy(meta_list[meta_count].value, "80");
meta_count++;
strcpy(meta_list[meta_count].name, "file_size");
strcpy(meta_list[meta_count].value, "115120");
meta_count++;
file_ext_name = fdfs_get_file_ext_name(local_filename);//获得文件名后缀
*group_name = '\0';
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
result = storage_upload_by_filename(pTrackerServer, \
pStorageServer, store_path_index, \
local_filename, file_ext_name, \
meta_list, meta_count, \
group_name, remote_filename);
printf("storage_upload_by_filename\n");
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_upload_by_filebuff(pTrackerServer, \
pStorageServer, store_path_index, \
file_content, file_size, file_ext_name, \
meta_list, meta_count, \
group_name, remote_filename);
free(file_content);
}
printf("storage_upload_by_filebuff\n");
}
else
{
struct stat stat_buf;
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_upload_by_callback(pTrackerServer, \
pStorageServer, store_path_index, \
uploadFileCallback, local_filename, \
file_size, file_ext_name, \
meta_list, meta_count, \
group_name, remote_filename);
}
printf("storage_upload_by_callback\n");
}
if (result != 0)
{
printf("upload file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
if (g_tracker_server_http_port == 80)
{
*szPortPart = '\0';
}
else
{
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
}
sprintf(file_id, "%s/%s", group_name, remote_filename);
url_len = sprintf(file_url, "http://%s%s/%s", \
pStorageServer->ip_addr, szPortPart, file_id);
if (g_anti_steal_token)
{
ts = time(NULL);
fdfs_http_gen_token(&g_anti_steal_secret_key, file_id, \
ts, token);
sprintf(file_url + url_len, "?token=%s&ts=%d", \
token, (int)ts);
}
printf("group_name=%s, remote_filename=%s\n", \
group_name, remote_filename);
fdfs_get_file_info(group_name, remote_filename, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
printf("file crc32=%u\n", file_info.crc32);
printf("example file url: %s\n", file_url);
strcpy(master_filename, remote_filename);
*remote_filename = '\0';
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
prefix_name = "_big";
result = storage_upload_slave_by_filename(pTrackerServer,
NULL, local_filename, master_filename, \
prefix_name, file_ext_name, \
meta_list, meta_count, \
group_name, remote_filename);
printf("storage_upload_slave_by_filename\n");
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
prefix_name = "1024x1024";
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_upload_slave_by_filebuff(pTrackerServer, \
NULL, file_content, file_size, master_filename,
prefix_name, file_ext_name, \
meta_list, meta_count, \
group_name, remote_filename);
free(file_content);
}
printf("storage_upload_slave_by_filebuff\n");
}
else
{
struct stat stat_buf;
prefix_name = "-small";
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_upload_slave_by_callback(pTrackerServer, \
NULL, uploadFileCallback, local_filename, \
file_size, master_filename, prefix_name, \
file_ext_name, meta_list, meta_count, \
group_name, remote_filename);
}
printf("storage_upload_slave_by_callback\n");
}
if (result != 0)
{
printf("upload slave file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
if (g_tracker_server_http_port == 80)
{
*szPortPart = '\0';
}
else
{
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
}
sprintf(file_id, "%s/%s", group_name, remote_filename);
url_len = sprintf(file_url, "http://%s%s/%s", \
pStorageServer->ip_addr, szPortPart, file_id);
if (g_anti_steal_token)
{
ts = time(NULL);
fdfs_http_gen_token(&g_anti_steal_secret_key, file_id, \
ts, token);
sprintf(file_url + url_len, "?token=%s&ts=%d", \
token, (int)ts);
}
printf("group_name=%s, remote_filename=%s\n", \
group_name, remote_filename);
fdfs_get_file_info(group_name, remote_filename, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
printf("file crc32=%u\n", file_info.crc32);
printf("example file url: %s\n", file_url);
if (fdfs_gen_slave_filename(master_filename, \
prefix_name, file_ext_name, \
slave_filename, &slave_filename_len) == 0)
{
if (strcmp(remote_filename, slave_filename) != 0)
{
printf("slave_filename=%s\n" \
"remote_filename=%s\n" \
"not equal!\n", \
slave_filename, remote_filename);
}
}
}
else if (strcmp(operation, "download") == 0 ||
strcmp(operation, "getmeta") == 0 ||
strcmp(operation, "setmeta") == 0 ||
strcmp(operation, "query_servers") == 0 ||
strcmp(operation, "delete") == 0)
{
if (argc < 5)
{
printf("Usage: %s <config_file> %s " \
"<group_name> <remote_filename>\n", \
argv[0], operation);
fdfs_client_destroy();
return EINVAL;
}
snprintf(group_name, sizeof(group_name), "%s", argv[3]);
snprintf(remote_filename, sizeof(remote_filename), \
"%s", argv[4]);
if (strcmp(operation, "setmeta") == 0 ||
strcmp(operation, "delete") == 0)
{
result = tracker_query_storage_update(pTrackerServer, \
&storageServer, group_name, remote_filename);
}
else if (strcmp(operation, "query_servers") == 0)
{
ConnectionInfo storageServers[FDFS_MAX_SERVERS_EACH_GROUP];
int server_count;
result = tracker_query_storage_list(pTrackerServer, \
storageServers, FDFS_MAX_SERVERS_EACH_GROUP, \
&server_count, group_name, remote_filename);
if (result != 0)
{
printf("tracker_query_storage_list fail, "\
"group_name=%s, filename=%s, " \
"error no: %d, error info: %s\n", \
group_name, remote_filename, \
result, STRERROR(result));
}
else
{
printf("server list (%d):\n", server_count);
for (i=0; i<server_count; i++)
{
printf("\t%s:%d\n", \
storageServers[i].ip_addr, \
storageServers[i].port);
}
printf("\n");
}
tracker_close_connection_ex(pTrackerServer, result != 0);
fdfs_client_destroy();
return result;
}
else
{
result = tracker_query_storage_fetch(pTrackerServer, \
&storageServer, group_name, remote_filename);
}
if (result != 0)
{
fdfs_client_destroy();
printf("tracker_query_storage_fetch fail, " \
"group_name=%s, filename=%s, " \
"error no: %d, error info: %s\n", \
group_name, remote_filename, \
result, STRERROR(result));
return result;
}
printf("storage=%s:%d\n", storageServer.ip_addr, \
storageServer.port);
if ((pStorageServer=tracker_make_connection(&storageServer, \
&result)) == NULL)
{
fdfs_client_destroy();
return result;
}
if (strcmp(operation, "download") == 0)
{
if (argc >= 6)
{
local_filename = argv[5];
if (strcmp(local_filename, "CALLBACK") == 0)
{
FILE *fp;
fp = fopen(local_filename, "wb");
if (fp == NULL)
{
result = errno != 0 ? errno : EPERM;
printf("open file \"%s\" fail, " \
"errno: %d, error info: %s", \
local_filename, result, \
STRERROR(result));
}
else
{
result = storage_download_file_ex( \
pTrackerServer, pStorageServer, \
group_name, remote_filename, 0, 0, \
writeToFileCallback, fp, &file_size);
fclose(fp);
}
}
else
{
result = storage_download_file_to_file( \
pTrackerServer, pStorageServer, \
group_name, remote_filename, \
local_filename, &file_size);
}
}
else
{
file_buff = NULL;
if ((result=storage_download_file_to_buff( \
pTrackerServer, pStorageServer, \
group_name, remote_filename, \
&file_buff, &file_size)) == 0)
{
local_filename = strrchr( \
remote_filename, '/');
if (local_filename != NULL)
{
local_filename++; //skip /
}
else
{
local_filename=remote_filename;
}
result = writeToFile(local_filename, \
file_buff, file_size);
free(file_buff);
}
}
if (result == 0)
{
printf("download file success, " \
"file size=%"PRId64", file save to %s\n", \
file_size, local_filename);
}
else
{
printf("download file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
}
else if (strcmp(operation, "getmeta") == 0)
{
if ((result=storage_get_metadata(pTrackerServer, \
pStorageServer, group_name, remote_filename, \
&pMetaList, &meta_count)) == 0)
{
printf("get meta data success, " \
"meta count=%d\n", meta_count);
for (i=0; i<meta_count; i++)
{
printf("%s=%s\n", \
pMetaList[i].name, \
pMetaList[i].value);
}
free(pMetaList);
}
else
{
printf("getmeta fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
}
else if (strcmp(operation, "setmeta") == 0)
{
if (argc < 7)
{
printf("Usage: %s <config_file> %s " \
"<group_name> <remote_filename> " \
"<op_flag> <metadata_list>\n" \
"\top_flag: %c for overwrite, " \
"%c for merge\n" \
"\tmetadata_list: name1=value1," \
"name2=value2,...\n", \
argv[0], operation, \
STORAGE_SET_METADATA_FLAG_OVERWRITE, \
STORAGE_SET_METADATA_FLAG_MERGE);
fdfs_client_destroy();
return EINVAL;
}
meta_buff = strdup(argv[6]);
if (meta_buff == NULL)
{
printf("Out of memory!\n");
return ENOMEM;
}
pMetaList = fdfs_split_metadata_ex(meta_buff, \
',', '=', &meta_count, &result);
if (pMetaList == NULL)
{
printf("Out of memory!\n");
free(meta_buff);
return ENOMEM;
}
if ((result=storage_set_metadata(pTrackerServer, \
NULL, group_name, remote_filename, \
pMetaList, meta_count, *argv[5])) == 0)
{
printf("set meta data success\n");
}
else
{
printf("setmeta fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
free(meta_buff);
free(pMetaList);
}
else if(strcmp(operation, "delete") == 0)
{
if ((result=storage_delete_file(pTrackerServer, \
NULL, group_name, remote_filename)) == 0)
{
printf("delete file success\n");
}
else
{
printf("delete file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
}
}
else
{
fdfs_client_destroy();
printf("invalid operation: %s\n", operation);
return EINVAL;
}
/* for test only */
if ((result=fdfs_active_test(pTrackerServer)) != 0)
{
printf("active_test to tracker server %s:%d fail, errno: %d\n", \
pTrackerServer->ip_addr, pTrackerServer->port, result);
}
/* for test only */
if ((result=fdfs_active_test(pStorageServer)) != 0)
{
printf("active_test to storage server %s:%d fail, errno: %d\n", \
pStorageServer->ip_addr, pStorageServer->port, result);
}
tracker_close_connection_ex(pStorageServer, true);
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return result;
}

658
JCEC-fastdfs/client/fdfs_test1.c Executable file
View File

@ -0,0 +1,658 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fdfs_client.h"
#include "fdfs_global.h"
#include "fastcommon/base64.h"
#include "fdfs_http_shared.h"
#include "fastcommon/sockopt.h"
#include "fastcommon/logger.h"
int writeToFileCallback(void *arg, const int64_t file_size, const char *data, \
const int current_size)
{
if (arg == NULL)
{
return EINVAL;
}
if (fwrite(data, current_size, 1, (FILE *)arg) != 1)
{
return errno != 0 ? errno : EIO;
}
return 0;
}
int uploadFileCallback(void *arg, const int64_t file_size, int sock)
{
int64_t total_send_bytes;
char *filename;
if (arg == NULL)
{
return EINVAL;
}
filename = (char *)arg;
return tcpsendfile(sock, filename, file_size, \
g_fdfs_network_timeout, &total_send_bytes);
}
int main(int argc, char *argv[])
{
char *conf_filename;
char *local_filename;
ConnectionInfo *pTrackerServer;
ConnectionInfo *pStorageServer;
int result;
ConnectionInfo storageServer;
char group_name[FDFS_GROUP_NAME_MAX_LEN + 1];
FDFSMetaData meta_list[32];
int meta_count;
int i;
FDFSMetaData *pMetaList;
char token[32 + 1];
char file_id[128];
char master_file_id[128];
char file_url[256];
char szDatetime[20];
char szPortPart[16];
int url_len;
time_t ts;
char *file_buff;
int64_t file_size;
char *operation;
char *meta_buff;
int store_path_index;
FDFSFileInfo file_info;
printf("This is FastDFS client test program v%d.%02d\n" \
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
"\nFastDFS may be copied only under the terms of the GNU General\n" \
"Public License V3, which may be found in the FastDFS source kit.\n" \
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
"for more detail.\n\n" \
, g_fdfs_version.major, g_fdfs_version.minor);
if (argc < 3)
{
printf("Usage: %s <config_file> <operation>\n" \
"\toperation: upload, download, getmeta, setmeta, " \
"delete and query_servers\n", argv[0]);
return 1;
}
log_init();
g_log_context.log_level = LOG_DEBUG;
conf_filename = argv[1];
operation = argv[2];
if ((result=fdfs_client_init(conf_filename)) != 0)
{
return result;
}
pTrackerServer = tracker_get_connection();
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
local_filename = NULL;
if (strcmp(operation, "upload") == 0)
{
int upload_type;
char *prefix_name;
const char *file_ext_name;
char slave_file_id[256];
int slave_file_id_len;
if (argc < 4)
{
printf("Usage: %s <config_file> upload " \
"<local_filename> [FILE | BUFF | CALLBACK] \n",\
argv[0]);
fdfs_client_destroy();
return EINVAL;
}
local_filename = argv[3];
if (argc == 4)
{
upload_type = FDFS_UPLOAD_BY_FILE;
}
else
{
if (strcmp(argv[4], "BUFF") == 0)
{
upload_type = FDFS_UPLOAD_BY_BUFF;
}
else if (strcmp(argv[4], "CALLBACK") == 0)
{
upload_type = FDFS_UPLOAD_BY_CALLBACK;
}
else
{
upload_type = FDFS_UPLOAD_BY_FILE;
}
}
{
ConnectionInfo storageServers[FDFS_MAX_SERVERS_EACH_GROUP];
ConnectionInfo *pServer;
ConnectionInfo *pServerEnd;
int storage_count;
strcpy(group_name, "group1");
if ((result=tracker_query_storage_store_list_with_group( \
pTrackerServer, group_name, storageServers, \
FDFS_MAX_SERVERS_EACH_GROUP, &storage_count, \
&store_path_index)) == 0)
{
printf("tracker_query_storage_store_list_with_group: \n");
pServerEnd = storageServers + storage_count;
for (pServer=storageServers; pServer<pServerEnd; pServer++)
{
printf("\tserver %d. group_name=%s, " \
"ip_addr=%s, port=%d\n", \
(int)(pServer - storageServers) + 1, \
group_name, pServer->ip_addr, \
pServer->port);
}
printf("\n");
}
}
*group_name = '\0';
if ((result=tracker_query_storage_store(pTrackerServer, \
&storageServer, group_name, &store_path_index)) != 0)
{
fdfs_client_destroy();
printf("tracker_query_storage fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
return result;
}
printf("group_name=%s, ip_addr=%s, port=%d\n", \
group_name, storageServer.ip_addr, \
storageServer.port);
if ((pStorageServer=tracker_make_connection(&storageServer, \
&result)) == NULL)
{
fdfs_client_destroy();
return result;
}
memset(&meta_list, 0, sizeof(meta_list));
meta_count = 0;
strcpy(meta_list[meta_count].name, "ext_name");
strcpy(meta_list[meta_count].value, "jpg");
meta_count++;
strcpy(meta_list[meta_count].name, "width");
strcpy(meta_list[meta_count].value, "160");
meta_count++;
strcpy(meta_list[meta_count].name, "height");
strcpy(meta_list[meta_count].value, "80");
meta_count++;
strcpy(meta_list[meta_count].name, "file_size");
strcpy(meta_list[meta_count].value, "115120");
meta_count++;
file_ext_name = fdfs_get_file_ext_name(local_filename);
strcpy(group_name, "");
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
printf("storage_upload_by_filename\n");
result = storage_upload_by_filename1(pTrackerServer, \
pStorageServer, store_path_index, \
local_filename, file_ext_name, \
meta_list, meta_count, \
group_name, file_id);
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
printf("storage_upload_by_filebuff\n");
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_upload_by_filebuff1(pTrackerServer, \
pStorageServer, store_path_index, \
file_content, file_size, file_ext_name, \
meta_list, meta_count, \
group_name, file_id);
free(file_content);
}
}
else
{
struct stat stat_buf;
printf("storage_upload_by_callback\n");
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_upload_by_callback1(pTrackerServer, \
pStorageServer, store_path_index, \
uploadFileCallback, local_filename, \
file_size, file_ext_name, \
meta_list, meta_count, \
group_name, file_id);
}
}
if (result != 0)
{
printf("upload file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
if (g_tracker_server_http_port == 80)
{
*szPortPart = '\0';
}
else
{
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
}
url_len = sprintf(file_url, "http://%s%s/%s", \
pStorageServer->ip_addr, szPortPart, file_id);
if (g_anti_steal_token)
{
ts = time(NULL);
fdfs_http_gen_token(&g_anti_steal_secret_key, \
file_id, ts, token);
sprintf(file_url + url_len, "?token=%s&ts=%d", \
token, (int)ts);
}
fdfs_get_file_info1(file_id, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
printf("file crc32=%u\n", file_info.crc32);
printf("example file url: %s\n", file_url);
strcpy(master_file_id, file_id);
*file_id = '\0';
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
prefix_name = "_big";
printf("storage_upload_slave_by_filename\n");
result = storage_upload_slave_by_filename1( \
pTrackerServer, NULL, \
local_filename, master_file_id, \
prefix_name, file_ext_name, \
meta_list, meta_count, file_id);
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
prefix_name = "1024x1024";
printf("storage_upload_slave_by_filebuff\n");
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_upload_slave_by_filebuff1( \
pTrackerServer, NULL, file_content, file_size, \
master_file_id, prefix_name, file_ext_name, \
meta_list, meta_count, file_id);
free(file_content);
}
}
else
{
struct stat stat_buf;
prefix_name = "_small";
printf("storage_upload_slave_by_callback\n");
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_upload_slave_by_callback1( \
pTrackerServer, NULL, \
uploadFileCallback, local_filename, \
file_size, master_file_id, \
prefix_name, file_ext_name, \
meta_list, meta_count, file_id);
}
}
if (result != 0)
{
printf("upload slave file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
if (g_tracker_server_http_port == 80)
{
*szPortPart = '\0';
}
else
{
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
}
url_len = sprintf(file_url, "http://%s%s/%s", \
pStorageServer->ip_addr, szPortPart, file_id);
if (g_anti_steal_token)
{
ts = time(NULL);
fdfs_http_gen_token(&g_anti_steal_secret_key, \
file_id, ts, token);
sprintf(file_url + url_len, "?token=%s&ts=%d", \
token, (int)ts);
}
fdfs_get_file_info1(file_id, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
printf("file crc32=%u\n", file_info.crc32);
printf("example file url: %s\n", file_url);
if (fdfs_gen_slave_filename(master_file_id, \
prefix_name, file_ext_name, \
slave_file_id, &slave_file_id_len) == 0)
{
if (strcmp(file_id, slave_file_id) != 0)
{
printf("slave_file_id=%s\n" \
"file_id=%s\n" \
"not equal!\n", \
slave_file_id, file_id);
}
}
}
else if (strcmp(operation, "download") == 0 ||
strcmp(operation, "getmeta") == 0 ||
strcmp(operation, "setmeta") == 0 ||
strcmp(operation, "query_servers") == 0 ||
strcmp(operation, "delete") == 0)
{
if (argc < 4)
{
printf("Usage: %s <config_file> %s " \
"<file_id>\n", \
argv[0], operation);
fdfs_client_destroy();
return EINVAL;
}
snprintf(file_id, sizeof(file_id), "%s", argv[3]);
if (strcmp(operation, "query_servers") == 0)
{
ConnectionInfo storageServers[FDFS_MAX_SERVERS_EACH_GROUP];
int server_count;
result = tracker_query_storage_list1(pTrackerServer, \
storageServers, FDFS_MAX_SERVERS_EACH_GROUP, \
&server_count, file_id);
if (result != 0)
{
printf("tracker_query_storage_list1 fail, "\
"file_id=%s, " \
"error no: %d, error info: %s\n", \
file_id, result, STRERROR(result));
}
else
{
printf("server list (%d):\n", server_count);
for (i=0; i<server_count; i++)
{
printf("\t%s:%d\n", \
storageServers[i].ip_addr, \
storageServers[i].port);
}
printf("\n");
}
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return result;
}
if ((result=tracker_query_storage_fetch1(pTrackerServer, \
&storageServer, file_id)) != 0)
{
fdfs_client_destroy();
printf("tracker_query_storage_fetch fail, " \
"file_id=%s, " \
"error no: %d, error info: %s\n", \
file_id, result, STRERROR(result));
return result;
}
printf("storage=%s:%d\n", storageServer.ip_addr, \
storageServer.port);
if ((pStorageServer=tracker_make_connection(&storageServer, \
&result)) == NULL)
{
fdfs_client_destroy();
return result;
}
if (strcmp(operation, "download") == 0)
{
if (argc >= 5)
{
local_filename = argv[4];
if (strcmp(local_filename, "CALLBACK") == 0)
{
FILE *fp;
fp = fopen(local_filename, "wb");
if (fp == NULL)
{
result = errno != 0 ? errno : EPERM;
printf("open file \"%s\" fail, " \
"errno: %d, error info: %s", \
local_filename, result, \
STRERROR(result));
}
else
{
result = storage_download_file_ex1( \
pTrackerServer, pStorageServer, \
file_id, 0, 0, \
writeToFileCallback, fp, &file_size);
fclose(fp);
}
}
else
{
result = storage_download_file_to_file1( \
pTrackerServer, pStorageServer, \
file_id, \
local_filename, &file_size);
}
}
else
{
file_buff = NULL;
if ((result=storage_download_file_to_buff1( \
pTrackerServer, pStorageServer, \
file_id, \
&file_buff, &file_size)) == 0)
{
local_filename = strrchr( \
file_id, '/');
if (local_filename != NULL)
{
local_filename++; //skip /
}
else
{
local_filename=file_id;
}
result = writeToFile(local_filename, \
file_buff, file_size);
free(file_buff);
}
}
if (result == 0)
{
printf("download file success, " \
"file size=%"PRId64", file save to %s\n", \
file_size, local_filename);
}
else
{
printf("download file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
}
else if (strcmp(operation, "getmeta") == 0)
{
if ((result=storage_get_metadata1(pTrackerServer, \
NULL, file_id, \
&pMetaList, &meta_count)) == 0)
{
printf("get meta data success, " \
"meta count=%d\n", meta_count);
for (i=0; i<meta_count; i++)
{
printf("%s=%s\n", \
pMetaList[i].name, \
pMetaList[i].value);
}
free(pMetaList);
}
else
{
printf("getmeta fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
}
else if (strcmp(operation, "setmeta") == 0)
{
if (argc < 6)
{
printf("Usage: %s <config_file> %s " \
"<file_id> " \
"<op_flag> <metadata_list>\n" \
"\top_flag: %c for overwrite, " \
"%c for merge\n" \
"\tmetadata_list: name1=value1," \
"name2=value2,...\n", \
argv[0], operation, \
STORAGE_SET_METADATA_FLAG_OVERWRITE, \
STORAGE_SET_METADATA_FLAG_MERGE);
fdfs_client_destroy();
return EINVAL;
}
meta_buff = strdup(argv[5]);
if (meta_buff == NULL)
{
printf("Out of memory!\n");
return ENOMEM;
}
pMetaList = fdfs_split_metadata_ex(meta_buff, \
',', '=', &meta_count, &result);
if (pMetaList == NULL)
{
printf("Out of memory!\n");
free(meta_buff);
return ENOMEM;
}
if ((result=storage_set_metadata1(pTrackerServer, \
NULL, file_id, \
pMetaList, meta_count, *argv[4])) == 0)
{
printf("set meta data success\n");
}
else
{
printf("setmeta fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
free(meta_buff);
free(pMetaList);
}
else if(strcmp(operation, "delete") == 0)
{
if ((result=storage_delete_file1(pTrackerServer, \
NULL, file_id)) == 0)
{
printf("delete file success\n");
}
else
{
printf("delete file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
}
}
else
{
fdfs_client_destroy();
printf("invalid operation: %s\n", operation);
return EINVAL;
}
/* for test only */
if ((result=fdfs_active_test(pTrackerServer)) != 0)
{
printf("active_test to tracker server %s:%d fail, errno: %d\n", \
pTrackerServer->ip_addr, pTrackerServer->port, result);
}
/* for test only */
if ((result=fdfs_active_test(pStorageServer)) != 0)
{
printf("active_test to storage server %s:%d fail, errno: %d\n", \
pStorageServer->ip_addr, pStorageServer->port, result);
}
tracker_close_connection_ex(pStorageServer, true);
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return result;
}

View File

@ -0,0 +1,88 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fdfs_client.h"
#include "fastcommon/logger.h"
int main(int argc, char *argv[])
{
char *conf_filename;
char *local_filename;
char group_name[FDFS_GROUP_NAME_MAX_LEN + 1];
ConnectionInfo *pTrackerServer;
int result;
int store_path_index;
ConnectionInfo storageServer;
char file_id[128];
if (argc < 3)
{
printf("Usage: %s <config_file> <local_filename>\n", argv[0]);
return 1;
}
log_init();
g_log_context.log_level = LOG_ERR;
ignore_signal_pipe();
conf_filename = argv[1];
if ((result=fdfs_client_init(conf_filename)) != 0)
{
return result;
}
pTrackerServer = tracker_get_connection();
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
*group_name = '\0';
store_path_index = 0;
if ((result=tracker_query_storage_store(pTrackerServer, \
&storageServer, group_name, &store_path_index)) != 0)
{
fdfs_client_destroy();
fprintf(stderr, "tracker_query_storage fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
return result;
}
local_filename = argv[2];
result = storage_upload_appender_by_filename1(pTrackerServer, \
&storageServer, store_path_index, \
local_filename, NULL, \
NULL, 0, group_name, file_id);
if (result != 0)
{
fprintf(stderr, "upload file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return result;
}
printf("%s\n", file_id);
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return 0;
}

View File

@ -0,0 +1,120 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fdfs_client.h"
#include "fastcommon/logger.h"
static void usage(char *argv[])
{
printf("Usage: %s <config_file> <local_filename> " \
"[storage_ip:port] [store_path_index]\n", argv[0]);
}
int main(int argc, char *argv[])
{
char *conf_filename;
char *local_filename;
char group_name[FDFS_GROUP_NAME_MAX_LEN + 1];
ConnectionInfo *pTrackerServer;
int result;
int store_path_index;
ConnectionInfo storageServer;
char file_id[128];
if (argc < 3)
{
usage(argv);
return 1;
}
log_init();
g_log_context.log_level = LOG_ERR;
ignore_signal_pipe();
conf_filename = argv[1];
if ((result=fdfs_client_init(conf_filename)) != 0)//根据客户端配置文件初始化客户端
{
return result;
}
pTrackerServer = tracker_get_connection();//连接Tracker
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
local_filename = argv[2];
*group_name = '\0';
if (argc >= 4)//支持用户指定IP和Port,甚至store_path_index
{
const char *pPort;
const char *pIpAndPort;
pIpAndPort = argv[3];
pPort = strchr(pIpAndPort, ':');
if (pPort == NULL)
{
fdfs_client_destroy();
fprintf(stderr, "invalid storage ip address and " \
"port: %s\n", pIpAndPort);
usage(argv);
return 1;
}
storageServer.sock = -1;
snprintf(storageServer.ip_addr, sizeof(storageServer.ip_addr), \
"%.*s", (int)(pPort - pIpAndPort), pIpAndPort);
storageServer.port = atoi(pPort + 1);
if (argc >= 5)
{
store_path_index = atoi(argv[4]);
}
else
{
store_path_index = -1;
}
}
else if ((result=tracker_query_storage_store(pTrackerServer, \
&storageServer, group_name, &store_path_index)) != 0)
{
fdfs_client_destroy();
fprintf(stderr, "tracker_query_storage fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
return result;
}
result = storage_upload_by_filename1(pTrackerServer, \
&storageServer, store_path_index, \
local_filename, NULL, \
NULL, 0, group_name, file_id);
if (result == 0)
{
printf("%s\n", file_id);
}
else
{
fprintf(stderr, "upload file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return result;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,593 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#ifndef STORAGE_CLIENT_H
#define STORAGE_CLIENT_H
#include "tracker_types.h"
#include "client_func.h"
#define FDFS_DOWNLOAD_TO_BUFF 1
#define FDFS_DOWNLOAD_TO_FILE 2
#define FDFS_DOWNLOAD_TO_CALLBACK 3
#define FDFS_UPLOAD_BY_BUFF 1
#define FDFS_UPLOAD_BY_FILE 2
#define FDFS_UPLOAD_BY_CALLBACK 3
#define FDFS_FILE_ID_SEPERATOR '/'
#define FDFS_FILE_ID_SEPERATE_STR "/"
#ifdef __cplusplus
extern "C" {
#endif
#define storage_upload_by_filename(pTrackerServer, \
pStorageServer, store_path_index, local_filename, \
file_ext_name, meta_list, meta_count, group_name, \
remote_filename) \
storage_upload_by_filename_ex(pTrackerServer, \
pStorageServer, store_path_index, \
STORAGE_PROTO_CMD_UPLOAD_FILE, local_filename, \
file_ext_name, meta_list, meta_count, group_name, \
remote_filename)
#define storage_upload_appender_by_filename(pTrackerServer, \
pStorageServer, store_path_index, local_filename, \
file_ext_name, meta_list, meta_count, group_name, \
remote_filename) \
storage_upload_by_filename_ex(pTrackerServer, \
pStorageServer, store_path_index, \
STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, local_filename, \
file_ext_name, meta_list, meta_count, group_name, \
remote_filename)
/**
* upload file to storage server (by file name)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* store_path_index: the index of path on the storage server
* local_filename: local filename to upload
* cmd: the protocol command
* file_ext_name: file ext name, not include dot(.),
* if be NULL will abstract ext name from the local filename
* meta_list: meta info array
* meta_count: meta item count
* group_name: if not empty, specify the group name.
return the group name to store the file
* remote_filename: return the new created filename
* return: 0 success, !=0 fail, return the error code
**/
int storage_upload_by_filename_ex(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const int store_path_index, \
const char cmd, const char *local_filename, \
const char *file_ext_name, const FDFSMetaData *meta_list, \
const int meta_count, char *group_name, char *remote_filename);
/**
* upload file to storage server (by file buff)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* store_path_index: the index of path on the storage server
* file_buff: file content/buff
* file_size: file size (bytes)
* file_ext_name: file ext name, not include dot(.), can be NULL
* meta_list: meta info array
* meta_count: meta item count
* group_name: if not empty, specify the group name.
return the group name to store the file
* remote_filename: return the new created filename
* return: 0 success, !=0 fail, return the error code
**/
#define storage_upload_by_filebuff(pTrackerServer, pStorageServer, \
store_path_index, file_buff, \
file_size, file_ext_name, meta_list, meta_count, \
group_name, remote_filename) \
storage_do_upload_file(pTrackerServer, pStorageServer, \
store_path_index, STORAGE_PROTO_CMD_UPLOAD_FILE, \
FDFS_UPLOAD_BY_BUFF, file_buff, NULL, \
file_size, NULL, NULL, file_ext_name, meta_list, meta_count, \
group_name, remote_filename)
#define storage_upload_appender_by_filebuff(pTrackerServer, pStorageServer, \
store_path_index, file_buff, \
file_size, file_ext_name, meta_list, meta_count, \
group_name, remote_filename) \
storage_do_upload_file(pTrackerServer, pStorageServer, \
store_path_index, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, \
FDFS_UPLOAD_BY_BUFF, file_buff, NULL, \
file_size, NULL, NULL, file_ext_name, meta_list, meta_count, \
group_name, remote_filename)
/**
* Upload file callback function prototype
* params:
* arg: callback extra arguement
* sock: connected storage socket for sending file content
* return: 0 success, !=0 fail, should return the error code
**/
typedef int (*UploadCallback) (void *arg, const int64_t file_size, int sock);
/**
* upload file to storage server (by callback)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* store_path_index: the index of path on the storage server
* callback: callback function to send file content to storage server
* arg: callback extra arguement
* file_size: the file size
* file_ext_name: file ext name, not include dot(.), can be NULL
* meta_list: meta info array
* meta_count: meta item count
* group_name: if not empty, specify the group name.
return the group name to store the file
* remote_filename: return the new created filename
* return: 0 success, !=0 fail, return the error code
**///
#define storage_upload_by_callback(pTrackerServer, pStorageServer, \
store_path_index, callback, arg, file_size, file_ext_name, \
meta_list, meta_count, group_name, remote_filename) \
storage_upload_by_callback_ex(pTrackerServer, pStorageServer, \
store_path_index, STORAGE_PROTO_CMD_UPLOAD_FILE, \
callback, arg, file_size, file_ext_name, meta_list, \
meta_count, group_name, remote_filename)
#define storage_upload_appender_by_callback(pTrackerServer, pStorageServer, \
store_path_index, callback, arg, file_size, file_ext_name, \
meta_list, meta_count, group_name, remote_filename) \
storage_upload_by_callback_ex(pTrackerServer, pStorageServer, \
store_path_index, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, \
callback, arg, file_size, file_ext_name, meta_list, \
meta_count, group_name, remote_filename)
int storage_upload_by_callback_ex(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const int store_path_index, \
const char cmd, UploadCallback callback, void *arg, \
const int64_t file_size, const char *file_ext_name, \
const FDFSMetaData *meta_list, const int meta_count, \
char *group_name, char *remote_filename);
int storage_do_upload_file(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const int store_path_index, \
const char cmd, const int upload_type, const char *file_buff, \
void *arg, const int64_t file_size, const char *master_filename, \
const char *prefix_name, const char *file_ext_name, \
const FDFSMetaData *meta_list, const int meta_count, \
char *group_name, char *remote_filename);
/**
* delete file from storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* group_name: the group name of storage server
* filename: filename on storage server
* return: 0 success, !=0 fail, return the error code
**/
int storage_delete_file(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *group_name, const char *filename);
/**
* download file from storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* group_name: the group name of storage server
* remote_filename: filename on storage server
* file_buff: return file content/buff, must be freed
* file_size: return file size (bytes)
* return: 0 success, !=0 fail, return the error code
**/
#define storage_download_file(pTrackerServer, pStorageServer, group_name, \
remote_filename, file_buff, file_size) \
storage_do_download_file_ex(pTrackerServer, pStorageServer, \
FDFS_DOWNLOAD_TO_BUFF, group_name, remote_filename, \
0, 0, file_buff, NULL, file_size)
#define storage_download_file_to_buff(pTrackerServer, pStorageServer, \
group_name, remote_filename, file_buff, file_size) \
storage_do_download_file_ex(pTrackerServer, pStorageServer, \
FDFS_DOWNLOAD_TO_BUFF, group_name, remote_filename, \
0, 0, file_buff, NULL, file_size)
#define storage_do_download_file(pTrackerServer, pStorageServer, \
download_type, group_name, remote_filename, \
file_buff, arg, file_size) \
storage_do_download_file_ex(pTrackerServer, pStorageServer, \
download_type, group_name, remote_filename, \
0, 0, file_buff, arg, file_size);
/**
* download file from storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* download_type: FDFS_DOWNLOAD_TO_BUFF or FDFS_DOWNLOAD_TO_FILE
* or FDFS_DOWNLOAD_TO_CALLBACK
* group_name: the group name of storage server
* remote_filename: filename on storage server
* file_offset: the start offset to download
* download_bytes: download bytes, 0 means from start offset to the file end
* file_buff: return file content/buff, must be freed
* arg: additional argument for callback(valid only when download_tyee
* is FDFS_DOWNLOAD_TO_CALLBACK), can be NULL
* file_size: return file size (bytes)
* return: 0 success, !=0 fail, return the error code
**/
int storage_do_download_file_ex(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const int download_type, \
const char *group_name, const char *remote_filename, \
const int64_t file_offset, const int64_t download_bytes, \
char **file_buff, void *arg, int64_t *file_size);
/**
* download file from storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* group_name: the group name of storage server
* remote_filename: filename on storage server
* local_filename: local filename to write
* file_size: return file size (bytes)
* return: 0 success, !=0 fail, return the error code
**/
int storage_download_file_to_file(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *group_name, const char *remote_filename, \
const char *local_filename, int64_t *file_size);
/**
* Download file callback function prototype
* params:
* arg: callback extra arguement
* file_size: file size
* data: temp buff, should not keep persistently
* current_size: current data size
* return: 0 success, !=0 fail, should return the error code
**/
typedef int (*DownloadCallback) (void *arg, const int64_t file_size, \
const char *data, const int current_size);
/**
* download file from storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* group_name: the group name of storage server
* remote_filename: filename on storage server
* file_offset: the start offset to download
* download_bytes: download bytes, 0 means from start offset to the file end
* callback: callback function
* arg: callback extra arguement
* file_size: return file size (bytes)
* return: 0 success, !=0 fail, return the error code
**/
int storage_download_file_ex(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *group_name, const char *remote_filename, \
const int64_t file_offset, const int64_t download_bytes, \
DownloadCallback callback, void *arg, int64_t *file_size);
/**
* set metadata items to storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* group_name: the group name of storage server
* filename: filename on storage server
* meta_list: meta item array
* meta_count: meta item count
* op_flag:
* # STORAGE_SET_METADATA_FLAG_OVERWRITE('O'): overwrite all old
* metadata items
* # STORAGE_SET_METADATA_FLAG_MERGE ('M'): merge, insert when
* the metadata item not exist, otherwise update it
* return: 0 success, !=0 fail, return the error code
**/
int storage_set_metadata(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *group_name, const char *filename, \
const FDFSMetaData *meta_list, const int meta_count, \
const char op_flag);
/**
* get all metadata items from storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* group_name: the group name of storage server
* filename: filename on storage server
* meta_list: return meta info array, must be freed
* meta_count: return meta item count
* return: 0 success, !=0 fail, return the error code
**/
int storage_get_metadata(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *group_name, const char *filename, \
FDFSMetaData **meta_list, \
int *meta_count);
/**
* upload slave file to storage server (by file name)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* local_filename: local filename to upload
* master_filename: the mater filename to generate the slave file id
* prefix_name: the prefix name to generate the slave file id
* file_ext_name: file ext name, not include dot(.),
* if be NULL will abstract ext name from the local filename
* meta_list: meta info array
* meta_count: meta item count
* group_name: specify the group name.
return the group name to store the file
* remote_filename: return the new created filename
* return: 0 success, !=0 fail, return the error code
**/
int storage_upload_slave_by_filename(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const char *local_filename,\
const char *master_filename, const char *prefix_name, \
const char *file_ext_name, \
const FDFSMetaData *meta_list, const int meta_count, \
char *group_name, char *remote_filename);
/**
* upload slave file to storage server (by file buff)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_buff: file content/buff
* file_size: file size (bytes)
* master_filename: the mater filename to generate the slave file id
* prefix_name: the prefix name to generate the slave file id
* file_ext_name: file ext name, not include dot(.), can be NULL
* meta_list: meta info array
* meta_count: meta item count
* group_name: specify the group name.
return the group name to store the file
* remote_filename: return the new created filename
* return: 0 success, !=0 fail, return the error code
**/
int storage_upload_slave_by_filebuff(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const char *file_buff, \
const int64_t file_size, const char *master_filename, \
const char *prefix_name, const char *file_ext_name, \
const FDFSMetaData *meta_list, const int meta_count, \
char *group_name, char *remote_filename);
/**
* upload slave file to storage server (by callback)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* callback: callback function to send file content to storage server
* arg: callback extra arguement
* file_size: the file size
* master_filename: the mater filename to generate the slave file id
* prefix_name: the prefix name to generate the slave file id
* file_ext_name: file ext name, not include dot(.), can be NULL
* meta_list: meta info array
* meta_count: meta item count
* group_name: specify the group name.
return the group name to store the file
* remote_filename: return the new created filename
* return: 0 success, !=0 fail, return the error code
**/
int storage_upload_slave_by_callback(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
UploadCallback callback, void *arg, \
const int64_t file_size, const char *master_filename, \
const char *prefix_name, const char *file_ext_name, \
const FDFSMetaData *meta_list, const int meta_count, \
char *group_name, char *remote_filename);
/**
* append file to storage server (by local filename)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* local_filename: local filename to upload
* group_name: the group name
* appender_filename: the appender filename
* return: 0 success, !=0 fail, return the error code
**/
int storage_append_by_filename(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const char *local_filename,\
const char *group_name, const char *appender_filename);
/**
* append file to storage server (by callback)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* callback: callback function to send file content to storage server
* arg: callback extra arguement
* file_size: the file size
* group_name: the group name
* appender_filename: the appender filename
* return: 0 success, !=0 fail, return the error code
**/
int storage_append_by_callback(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
UploadCallback callback, void *arg, const int64_t file_size, \
const char *group_name, const char *appender_filename);
/**
* append file to storage server (by file buff)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_buff: file content/buff
* file_size: file size (bytes)
* group_name: the group name
* appender_filename: the appender filename
* return: 0 success, !=0 fail, return the error code
**/
int storage_append_by_filebuff(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const char *file_buff, \
const int64_t file_size, const char *group_name, \
const char *appender_filename);
/**
* modify file to storage server (by local filename)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* local_filename: local filename to upload
* file_offset: the start offset to modify appender file
* group_name: the group name
* appender_filename: the appender filename
* return: 0 success, !=0 fail, return the error code
**/
int storage_modify_by_filename(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const char *local_filename,\
const int64_t file_offset, const char *group_name, \
const char *appender_filename);
/**
* modify file to storage server (by callback)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* callback: callback function to send file content to storage server
* arg: callback extra arguement
* file_offset: the start offset to modify appender file
* file_size: the file size
* group_name: the group name
* appender_filename: the appender filename
* return: 0 success, !=0 fail, return the error code
**/
int storage_modify_by_callback(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
UploadCallback callback, void *arg, \
const int64_t file_offset, const int64_t file_size, \
const char *group_name, const char *appender_filename);
/**
* modify file to storage server (by file buff)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_buff: file content/buff
* file_offset: the start offset to modify appender file
* file_size: file size (bytes)
* group_name: the group name
* appender_filename: the appender filename
* return: 0 success, !=0 fail, return the error code
**/
int storage_modify_by_filebuff(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const char *file_buff, \
const int64_t file_offset, const int64_t file_size, \
const char *group_name, const char *appender_filename);
/**
* truncate file to sepecify size
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* group_name: the group name
* appender_filename: the appender filename
* truncated_file_size: truncated file size
* return: 0 success, !=0 fail, return the error code
**/
int storage_truncate_file(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer,
const char *group_name, const char *appender_filename, \
const int64_t truncated_file_size);
#define storage_query_file_info(pTrackerServer, pStorageServer, \
group_name, filename, pFileInfo) \
storage_query_file_info_ex(pTrackerServer, pStorageServer, \
group_name, filename, pFileInfo, false)
/**
* query file info
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* group_name: the group name of storage server
* filename: filename on storage server
* pFileInfo: return the file info (file size and create timestamp)
* bSilence: when this file not exist, do not log error on storage server
* return: 0 success, !=0 fail, return the error code
**/
int storage_query_file_info_ex(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *group_name, const char *filename, \
FDFSFileInfo *pFileInfo, const bool bSilence);
#define fdfs_get_file_info(group_name, remote_filename, pFileInfo) \
fdfs_get_file_info_ex(group_name, remote_filename, true, pFileInfo)
//bh edit
#define fdfs_get_allonline_file_info(group_name, remote_filename, pFileInfo) \
fdfs_get_allonline_file_info_ex(group_name, remote_filename, true, pFileInfo)
//bh edit end
/**
* check if file exist
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* group_name: the group name of storage server
* remote_filename: filename on storage server
* return: 0 file exist, !=0 not exist, return the error code
**/
int storage_file_exist(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *group_name, const char *remote_filename);
/**
* get file info from the filename return by storage server
* params:
* group_name: the group name of storage server
* remote_filename: filename on storage server
* get_from_server: if get slave file info from storage server
* pFileInfo: return the file info
* return: 0 success, !=0 fail, return the error code
**/
int fdfs_get_file_info_ex(const char *group_name, const char *remote_filename, \
const bool get_from_server, FDFSFileInfo *pFileInfo);
//bh edit
int fdfs_get_allonline_file_info_ex(const char *group_name, const char *remote_filename, \
const bool get_from_server, FDFSFileInfo *pFileInfo);
//bh edit end
/**
* regenerate normal filename for appender file
* Note: the appender file will change to normal file
* params:
* pTrackerServer: the tracker server
* pStorageServer: the storage server
* group_name: the group name
* appender_filename: the appender filename
* new_group_name: return the new group name
* new_remote_filename: return the new filename
* return: 0 success, !=0 fail, return the error code
**/
int storage_regenerate_appender_filename(ConnectionInfo *pTrackerServer,
ConnectionInfo *pStorageServer, const char *group_name,
const char *appender_filename, char *new_group_name,
char *new_remote_filename);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,553 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#ifndef STORAGE_CLIENT1_H
#define STORAGE_CLIENT1_H
#include "tracker_types.h"
#include "storage_client.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* upload file to storage server (by file name)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* store_path_index: the index of path on the storage server
* local_filename: local filename to upload
* file_ext_name: file ext name, not include dot(.),
* if be NULL will abstract ext name from the local filename
* meta_list: meta info array
* meta_count: meta item count
* group_name: specify the group name to upload file to, can be NULL or emtpy
* file_id: return the new created file id (including group name and filename)
* return: 0 success, !=0 fail, return the error code
**/
#define storage_upload_by_filename1(pTrackerServer, pStorageServer, \
store_path_index, local_filename, file_ext_name, \
meta_list, meta_count, group_name, file_id) \
storage_upload_by_filename1_ex(pTrackerServer, pStorageServer, \
store_path_index, STORAGE_PROTO_CMD_UPLOAD_FILE, \
local_filename, file_ext_name, meta_list, meta_count, \
group_name, file_id)
#define storage_upload_appender_by_filename1(pTrackerServer, pStorageServer, \
store_path_index, local_filename, file_ext_name, \
meta_list, meta_count, group_name, file_id) \
storage_upload_by_filename1_ex(pTrackerServer, pStorageServer, \
store_path_index, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, \
local_filename, file_ext_name, meta_list, meta_count, \
group_name, file_id)
int storage_upload_by_filename1_ex(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const int store_path_index, \
const char cmd, const char *local_filename, \
const char *file_ext_name, const FDFSMetaData *meta_list, \
const int meta_count, const char *group_name, char *file_id);
/**
* upload file to storage server (by file buff)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* store_path_index: the index of path on the storage server
* file_buff: file content/buff
* file_size: file size (bytes)
* file_ext_name: file ext name, not include dot(.), can be NULL
* meta_list: meta info array
* meta_count: meta item count
* group_name: specify the group name to upload file to, can be NULL or emtpy
* file_id: return the new created file id (including group name and filename)
* return: 0 success, !=0 fail, return the error code
**/
#define storage_upload_by_filebuff1(pTrackerServer, pStorageServer, \
store_path_index, file_buff, file_size, file_ext_name, \
meta_list, meta_count, group_name, file_id) \
storage_do_upload_file1(pTrackerServer, pStorageServer, \
store_path_index, STORAGE_PROTO_CMD_UPLOAD_FILE, \
FDFS_UPLOAD_BY_BUFF, file_buff, NULL, \
file_size, file_ext_name, meta_list, meta_count, \
group_name, file_id)
#define storage_upload_appender_by_filebuff1(pTrackerServer, pStorageServer, \
store_path_index, file_buff, file_size, file_ext_name, \
meta_list, meta_count, group_name, file_id) \
storage_do_upload_file1(pTrackerServer, pStorageServer, \
store_path_index, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, \
FDFS_UPLOAD_BY_BUFF, file_buff, NULL, \
file_size, file_ext_name, meta_list, meta_count, \
group_name, file_id)
int storage_do_upload_file1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const int store_path_index, \
const char cmd, const int upload_type, \
const char *file_buff, void *arg, const int64_t file_size, \
const char *file_ext_name, const FDFSMetaData *meta_list, \
const int meta_count, const char *group_name, char *file_id);
/**
* upload file to storage server (by callback)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* store_path_index: the index of path on the storage server
* file_size: the file size
* file_ext_name: file ext name, not include dot(.), can be NULL
* callback: callback function to send file content to storage server
* arg: callback extra arguement
* meta_list: meta info array
* meta_count: meta item count
* group_name: specify the group name to upload file to, can be NULL or emtpy
* file_id: return the new created file id (including group name and filename)
* return: 0 success, !=0 fail, return the error code
**/
#define storage_upload_by_callback1(pTrackerServer, pStorageServer, \
store_path_index, callback, arg, \
file_size, file_ext_name, meta_list, meta_count, \
group_name, file_id) \
storage_upload_by_callback1_ex(pTrackerServer, pStorageServer, \
store_path_index, STORAGE_PROTO_CMD_UPLOAD_FILE, \
callback, arg, file_size, file_ext_name, meta_list, \
meta_count, group_name, file_id)
#define storage_upload_appender_by_callback1(pTrackerServer, pStorageServer, \
store_path_index, callback, arg, \
file_size, file_ext_name, meta_list, meta_count, \
group_name, file_id) \
storage_upload_by_callback1_ex(pTrackerServer, pStorageServer, \
store_path_index, STORAGE_PROTO_CMD_UPLOAD_APPENDER_FILE, \
callback, arg, file_size, file_ext_name, meta_list, \
meta_count, group_name, file_id)
int storage_upload_by_callback1_ex(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const int store_path_index, \
const char cmd, UploadCallback callback, void *arg, \
const int64_t file_size, const char *file_ext_name, \
const FDFSMetaData *meta_list, const int meta_count, \
const char *group_name, char *file_id);
/**
* delete file from storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_id: the file id to deleted (including group name and filename)
* return: 0 success, !=0 fail, return the error code
**/
int storage_delete_file1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *file_id);
/**
* delete file from storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* appender_file_id: the appender file id
* truncated_file_size: the truncated file size
* return: 0 success, !=0 fail, return the error code
**/
int storage_truncate_file1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer,
const char *appender_file_id, \
const int64_t truncated_file_size);
/**
* set metadata items to storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_id: the file id (including group name and filename)
* meta_list: meta item array
* meta_count: meta item count
* op_flag:
* # STORAGE_SET_METADATA_FLAG_OVERWRITE('O'): overwrite all old
* metadata items
* # STORAGE_SET_METADATA_FLAG_MERGE ('M'): merge, insert when
* the metadata item not exist, otherwise update it
* return: 0 success, !=0 fail, return the error code
**/
int storage_set_metadata1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *file_id, \
const FDFSMetaData *meta_list, const int meta_count, \
const char op_flag);
/**
* download file from storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_id: the file id (including group name and filename)
* file_buff: return file content/buff, must be freed
* file_size: return file size (bytes)
* return: 0 success, !=0 fail, return the error code
**/
#define storage_download_file1(pTrackerServer, pStorageServer, file_id, \
file_buff, file_size) \
storage_do_download_file1_ex(pTrackerServer, pStorageServer, \
FDFS_DOWNLOAD_TO_BUFF, file_id, 0, 0, \
file_buff, NULL, file_size)
#define storage_download_file_to_buff1(pTrackerServer, pStorageServer, \
file_id, file_buff, file_size) \
storage_do_download_file1_ex(pTrackerServer, pStorageServer, \
FDFS_DOWNLOAD_TO_BUFF, file_id, 0, 0, \
file_buff, NULL, file_size)
#define storage_do_download_file1(pTrackerServer, pStorageServer, \
download_type, file_id, file_buff, file_size) \
storage_do_download_file1_ex(pTrackerServer, pStorageServer, \
download_type, file_id, \
0, 0, file_buff, NULL, file_size)
/**
* download file from storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_id: the file id (including group name and filename)
* file_offset: the start offset to download
* download_bytes: download bytes, 0 means from start offset to the file end
* file_buff: return file content/buff, must be freed
* file_size: return file size (bytes)
* return: 0 success, !=0 fail, return the error code
**/
int storage_do_download_file1_ex(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const int download_type, const char *file_id, \
const int64_t file_offset, const int64_t download_bytes, \
char **file_buff, void *arg, int64_t *file_size);
/**
* download file from storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_id: the file id (including group name and filename)
* local_filename: local filename to write
* file_size: return file size (bytes)
* return: 0 success, !=0 fail, return the error code
**/
int storage_download_file_to_file1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *file_id, \
const char *local_filename, int64_t *file_size);
/**
* get all metadata items from storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_id: the file id (including group name and filename)
* meta_list: return meta info array, must be freed
* meta_count: return meta item count
* return: 0 success, !=0 fail, return the error code
**/
int storage_get_metadata1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *file_id, \
FDFSMetaData **meta_list, int *meta_count);
/**
* download file from storage server
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_id: the file id (including group name and filename)
* file_offset: the start offset to download
* download_bytes: download bytes, 0 means from start offset to the file end
* callback: callback function
* arg: callback extra arguement
* file_size: return file size (bytes)
* return: 0 success, !=0 fail, return the error code
**/
int storage_download_file_ex1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *file_id, \
const int64_t file_offset, const int64_t download_bytes, \
DownloadCallback callback, void *arg, int64_t *file_size);
/**
* query storage server to download file
* params:
* pTrackerServer: tracker server
* pStorageServer: return storage server
* file_id: the file id (including group name and filename)
* return: 0 success, !=0 fail, return the error code
**/
int tracker_query_storage_fetch1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *file_id);
/**
* query storage server to update (delete file and set metadata)
* params:
* pTrackerServer: tracker server
* pStorageServer: return storage server
* file_id: the file id (including group name and filename)
* return: 0 success, !=0 fail, return the error code
**/
int tracker_query_storage_update1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *file_id);
/**
* query storage server list to fetch file
* params:
* pTrackerServer: tracker server
* pStorageServer: return storage server
* nMaxServerCount: max storage server count
* server_count: return storage server count
* group_name: the group name of storage server
* filename: filename on storage server
* return: 0 success, !=0 fail, return the error code
**/
int tracker_query_storage_list1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const int nMaxServerCount, \
int *server_count, const char *file_id);
/**
* upload slave file to storage server (by file name)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* local_filename: local filename to upload
* master_file_id: the mater file id to generate the slave file id
* prefix_name: the prefix name to generate the file id
* file_ext_name: file ext name, not include dot(.),
* if be NULL will abstract ext name from the local filename
* meta_list: meta info array
* meta_count: meta item count
* file_id: return the slave file id
* return: 0 success, !=0 fail, return the error code
**/
int storage_upload_slave_by_filename1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const char *local_filename,\
const char *master_file_id, const char *prefix_name, \
const char *file_ext_name, \
const FDFSMetaData *meta_list, const int meta_count, \
char *file_id);
/**
* upload slave file to storage server (by file buff)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_buff: file content/buff
* file_size: file size (bytes)
* master_file_id: the mater file id to generate the slave file id
* prefix_name: the prefix name to generate the file id
* file_ext_name: file ext name, not include dot(.), can be NULL
* meta_list: meta info array
* meta_count: meta item count
* file_id: return the slave file id
* return: 0 success, !=0 fail, return the error code
**/
int storage_upload_slave_by_filebuff1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const char *file_buff, \
const int64_t file_size, const char *master_file_id, \
const char *prefix_name, const char *file_ext_name, \
const FDFSMetaData *meta_list, const int meta_count, \
char *file_id);
/**
* upload slave file to storage server (by callback)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* callback: callback function to send file content to storage server
* arg: callback extra arguement
* file_size: the file size
* master_file_id: the mater file id to generate the slave file id
* prefix_name: the prefix name to generate the file id
* file_ext_name: file ext name, not include dot(.), can be NULL
* meta_list: meta info array
* meta_count: meta item count
* file_id: return the slave file id
* return: 0 success, !=0 fail, return the error code
**/
int storage_upload_slave_by_callback1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
UploadCallback callback, void *arg, \
const int64_t file_size, const char *master_file_id, \
const char *prefix_name, const char *file_ext_name, \
const FDFSMetaData *meta_list, const int meta_count, \
char *file_id);
/**
* append file to storage server (by filename)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* local_filename: local filename to upload
* appender_file_id: the appender file id
* return: 0 success, !=0 fail, return the error code
**/
int storage_append_by_filename1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const char *local_filename,\
const char *appender_file_id);
/**
* append file to storage server (by file buff)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_buff: file content/buff
* file_size: file size (bytes)
* appender_file_id: the appender file id
* return: 0 success, !=0 fail, return the error code
**/
int storage_append_by_filebuff1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const char *file_buff, \
const int64_t file_size, const char *appender_file_id);
/**
* append file to storage server (by callback)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* callback: callback function to send file content to storage server
* arg: callback extra arguement
* file_size: the file size
* appender_file_id: the appender file id
* return: 0 success, !=0 fail, return the error code
**/
int storage_append_by_callback1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
UploadCallback callback, void *arg, \
const int64_t file_size, const char *appender_file_id);
/**
* modify file to storage server (by local filename)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* local_filename: local filename to upload
* file_offset: the start offset to modify appender file
* appender_file_id: the appender file id
* return: 0 success, !=0 fail, return the error code
**/
int storage_modify_by_filename1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const char *local_filename,\
const int64_t file_offset, const char *appender_file_id);
/**
* modify file to storage server (by callback)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* callback: callback function to send file content to storage server
* arg: callback extra arguement
* file_offset: the start offset to modify appender file
* file_size: the file size
* appender_file_id: the appender file id
* return: 0 success, !=0 fail, return the error code
**/
int storage_modify_by_callback1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
UploadCallback callback, void *arg, \
const int64_t file_offset, const int64_t file_size, \
const char *appender_file_id);
/**
* modify file to storage server (by file buff)
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_buff: file content/buff
* file_offset: the start offset to modify appender file
* file_size: file size (bytes)
* appender_file_id: the appender file id
* return: 0 success, !=0 fail, return the error code
**/
int storage_modify_by_filebuff1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const char *file_buff, \
const int64_t file_offset, const int64_t file_size, \
const char *appender_file_id);
#define storage_query_file_info1(pTrackerServer, pStorageServer, file_id, \
pFileInfo) \
storage_query_file_info_ex1(pTrackerServer, pStorageServer, file_id, \
pFileInfo, false)
/**
* query file info
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_id: the file id
* pFileInfo: return the file info (file size and create timestamp)
* bSilence: when this file not exist, do not log error on storage server
* return: 0 success, !=0 fail, return the error code
**/
int storage_query_file_info_ex1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const char *file_id, \
FDFSFileInfo *pFileInfo, const bool bSilence);
#define fdfs_get_file_info1(file_id, pFileInfo) \
fdfs_get_file_info_ex1(file_id, true, pFileInfo)
/**
* get file info from the filename return by storage server
* params:
* file_id: the file id return by storage server
* get_from_server: if get slave file info from storage server
* pFileInfo: return the file info
* return: 0 success, !=0 fail, return the error code
**/
int fdfs_get_file_info_ex1(const char *file_id, const bool get_from_server, \
FDFSFileInfo *pFileInfo);
//bh edit
int fdfs_get_allonline_file_info_ex1(const char *file_id, const bool get_from_server, \
FDFSFileInfo *pFileInfo);
//bh edit end
/**
* check if file exist
* params:
* pTrackerServer: tracker server
* pStorageServer: storage server
* file_id: the file id return by storage server
* return: 0 file exist, !=0 not exist, return the error code
**/
int storage_file_exist1(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, \
const char *file_id);
/**
* regenerate normal filename for appender file
* Note: the appender file will change to normal file
* params:
* pTrackerServer: the tracker server
* pStorageServer: the storage server
* group_name: the group name
* appender_file_id: the appender file id
* file_id: regenerated file id return by storage server
* return: 0 success, !=0 fail, return the error code
**/
int storage_regenerate_appender_filename1(ConnectionInfo *pTrackerServer,
ConnectionInfo *pStorageServer, const char *appender_file_id,
char *new_file_id);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,24 @@
.SUFFIXES: .c .o
COMPILE = $(CC) $(CFLAGS)
INC_PATH = -I/usr/include/fastcommon -I/usr/include/fastdfs \
-I/usr/local/include/fastcommon -I/usr/local/include/fastdfs
LIB_PATH = -L/usr/local/lib -lfastcommon -lfdfsclient $(LIBS)
TARGET_PATH = $(TARGET_PATH)
ALL_OBJS =
ALL_PRGS = fdfs_monitor fdfs_test fdfs_test1
all: $(ALL_OBJS) $(ALL_PRGS)
.o:
$(COMPILE) -o $@ $< $(SHARED_OBJS) $(LIB_PATH) $(INC_PATH)
.c:
$(COMPILE) -o $@ $< $(ALL_OBJS) $(LIB_PATH) $(INC_PATH)
.c.o:
$(COMPILE) -c -o $@ $< $(INC_PATH)
install:
cp -f $(ALL_PRGS) $(TARGET_PATH)
clean:
rm -f $(ALL_OBJS) $(ALL_PRGS)

View File

@ -0,0 +1,596 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <signal.h>
#include <netdb.h>
#include <sys/types.h>
#include "fastcommon/sockopt.h"
#include "fastcommon/logger.h"
#include "client_global.h"
#include "fdfs_global.h"
#include "fdfs_client.h"
static ConnectionInfo *pTrackerServer;
static int list_all_groups(const char *group_name);
static void usage(char *argv[])
{
printf("Usage: %s <config_file> [-h <tracker_server>] "
"[list|delete|set_trunk_server <group_name> [storage_id]]\n"
"\tthe tracker server format: host[:port], "
"the tracker default port is %d\n\n",
argv[0], FDFS_TRACKER_SERVER_DEF_PORT);
}
int main(int argc, char *argv[])
{
char *conf_filename;
int result;
char *op_type;
char *tracker_server;
int arg_index;
char *group_name;
if (argc < 2)
{
usage(argv);
return 1;
}
tracker_server = NULL;
conf_filename = argv[1];
arg_index = 2;
if (arg_index >= argc)
{
op_type = "list";
}
else
{
int len;
len = strlen(argv[arg_index]);
if (len >= 2 && strncmp(argv[arg_index], "-h", 2) == 0)
{
if (len == 2)
{
arg_index++;
if (arg_index >= argc)
{
usage(argv);
return 1;
}
tracker_server = argv[arg_index++];
}
else
{
tracker_server = argv[arg_index] + 2;
arg_index++;
}
if (arg_index < argc)
{
op_type = argv[arg_index++];
}
else
{
op_type = "list";
}
}
else
{
op_type = argv[arg_index++];
}
}
log_init();
g_log_context.log_level = LOG_DEBUG;
ignore_signal_pipe();
if ((result=fdfs_client_init(conf_filename)) != 0)
{
return result;
}
load_log_level_ex(conf_filename);
if (tracker_server == NULL)
{
if (g_tracker_group.server_count > 1)
{
srand(time(NULL));
rand(); //discard the first
g_tracker_group.server_index = (int)( \
(g_tracker_group.server_count * (double)rand()) \
/ (double)RAND_MAX);
}
}
else
{
int i;
ConnectionInfo conn;
if ((result=conn_pool_parse_server_info(tracker_server, &conn,
FDFS_TRACKER_SERVER_DEF_PORT)) != 0)
{
printf("resolve ip address of tracker server: %s "
"fail!, error info: %s\n", tracker_server, hstrerror(h_errno));
return result;
}
for (i=0; i<g_tracker_group.server_count; i++)
{
if (fdfs_server_contain1(g_tracker_group.servers + i, &conn))
{
g_tracker_group.server_index = i;
break;
}
}
if (i == g_tracker_group.server_count)
{
printf("tracker server: %s not exists!\n", tracker_server);
return 2;
}
}
printf("server_count=%d, server_index=%d\n",
g_tracker_group.server_count, g_tracker_group.server_index);
pTrackerServer = tracker_get_connection();
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
printf("\ntracker server is %s:%d\n\n", pTrackerServer->ip_addr, pTrackerServer->port);
if (arg_index < argc)
{
group_name = argv[arg_index++];
}
else
{
group_name = NULL;
}
if (strcmp(op_type, "list") == 0)
{
if (group_name == NULL)
{
result = list_all_groups(NULL);
}
else
{
result = list_all_groups(group_name);
}
}
else if (strcmp(op_type, "delete") == 0)
{
if (arg_index >= argc)
{
if ((result=tracker_delete_group(&g_tracker_group, \
group_name)) == 0)
{
printf("delete group: %s success\n", \
group_name);
}
else
{
printf("delete group: %s fail, " \
"error no: %d, error info: %s\n", \
group_name, result, STRERROR(result));
}
}
else
{
char *storage_id;
storage_id = argv[arg_index++];
if ((result=tracker_delete_storage(&g_tracker_group, \
group_name, storage_id)) == 0)
{
printf("delete storage server %s::%s success\n", \
group_name, storage_id);
}
else
{
printf("delete storage server %s::%s fail, " \
"error no: %d, error info: %s\n", \
group_name, storage_id, \
result, STRERROR(result));
}
}
}
else if (strcmp(op_type, "set_trunk_server") == 0)
{
char *storage_id;
char new_trunk_server_id[FDFS_STORAGE_ID_MAX_SIZE];
if (group_name == NULL)
{
usage(argv);
return 1;
}
if (arg_index >= argc)
{
storage_id = "";
}
else
{
storage_id = argv[arg_index++];
}
if ((result=tracker_set_trunk_server(&g_tracker_group, \
group_name, storage_id, new_trunk_server_id)) == 0)
{
printf("set trunk server %s::%s success, " \
"new trunk server: %s\n", group_name, \
storage_id, new_trunk_server_id);
}
else
{
printf("set trunk server %s::%s fail, " \
"error no: %d, error info: %s\n", \
group_name, storage_id, \
result, STRERROR(result));
}
}
else
{
printf("Invalid command %s\n\n", op_type);
usage(argv);
}
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return 0;
}
static int list_storages(FDFSGroupStat *pGroupStat)
{
int result;
int storage_count;
FDFSStorageInfo storage_infos[FDFS_MAX_SERVERS_EACH_GROUP];
FDFSStorageInfo *p;
FDFSStorageInfo *pStorage;
FDFSStorageInfo *pStorageEnd;
FDFSStorageStat *pStorageStat;
char szJoinTime[32];
char szUpTime[32];
char szLastHeartBeatTime[32];
char szSrcUpdTime[32];
char szSyncUpdTime[32];
char szSyncedTimestamp[32];
char szSyncedDelaySeconds[128];
char szHostname[128];
char szHostnamePrompt[128+8];
int k;
int max_last_source_update;
printf( "group name = %s\n" \
"disk total space = %"PRId64" MB\n" \
"disk free space = %"PRId64" MB\n" \
"trunk free space = %"PRId64" MB\n" \
"storage server count = %d\n" \
"active server count = %d\n" \
"storage server port = %d\n" \
"storage HTTP port = %d\n" \
"store path count = %d\n" \
"subdir count per path = %d\n" \
"current write server index = %d\n" \
"current trunk file id = %d\n\n", \
pGroupStat->group_name, \
pGroupStat->total_mb, \
pGroupStat->free_mb, \
pGroupStat->trunk_free_mb, \
pGroupStat->count, \
pGroupStat->active_count, \
pGroupStat->storage_port, \
pGroupStat->storage_http_port, \
pGroupStat->store_path_count, \
pGroupStat->subdir_count_per_path, \
pGroupStat->current_write_server, \
pGroupStat->current_trunk_file_id
);
result = tracker_list_servers(pTrackerServer, \
pGroupStat->group_name, NULL, \
storage_infos, FDFS_MAX_SERVERS_EACH_GROUP, \
&storage_count);
if (result != 0)
{
return result;
}
k = 0;
pStorageEnd = storage_infos + storage_count;
for (pStorage=storage_infos; pStorage<pStorageEnd; \
pStorage++)
{
max_last_source_update = 0;
for (p=storage_infos; p<pStorageEnd; p++)
{
if (p != pStorage && p->stat.last_source_update
> max_last_source_update)
{
max_last_source_update = \
p->stat.last_source_update;
}
}
pStorageStat = &(pStorage->stat);
if (max_last_source_update == 0)
{
*szSyncedDelaySeconds = '\0';
}
else
{
if (pStorageStat->last_synced_timestamp == 0)
{
strcpy(szSyncedDelaySeconds, "(never synced)");
}
else
{
int delay_seconds;
int remain_seconds;
int day;
int hour;
int minute;
int second;
char szDelayTime[64];
delay_seconds = (int)(max_last_source_update -
pStorageStat->last_synced_timestamp);
if (delay_seconds < 0)
{
delay_seconds = 0;
}
day = delay_seconds / (24 * 3600);
remain_seconds = delay_seconds % (24 * 3600);
hour = remain_seconds / 3600;
remain_seconds %= 3600;
minute = remain_seconds / 60;
second = remain_seconds % 60;
if (day != 0)
{
sprintf(szDelayTime, "%d days " \
"%02dh:%02dm:%02ds", \
day, hour, minute, second);
}
else if (hour != 0)
{
sprintf(szDelayTime, "%02dh:%02dm:%02ds", \
hour, minute, second);
}
else if (minute != 0)
{
sprintf(szDelayTime, "%02dm:%02ds", minute, second);
}
else
{
sprintf(szDelayTime, "%ds", second);
}
sprintf(szSyncedDelaySeconds, "(%s delay)", szDelayTime);
}
}
getHostnameByIp(pStorage->ip_addr, szHostname, sizeof(szHostname));
if (*szHostname != '\0')
{
sprintf(szHostnamePrompt, " (%s)", szHostname);
}
else
{
*szHostnamePrompt = '\0';
}
if (pStorage->up_time != 0)
{
formatDatetime(pStorage->up_time, \
"%Y-%m-%d %H:%M:%S", \
szUpTime, sizeof(szUpTime));
}
else
{
*szUpTime = '\0';
}
printf( "\tStorage %d:\n" \
"\t\tid = %s\n" \
"\t\tip_addr = %s%s %s\n" \
"\t\thttp domain = %s\n" \
"\t\tversion = %s\n" \
"\t\tjoin time = %s\n" \
"\t\tup time = %s\n" \
"\t\ttotal storage = %d MB\n" \
"\t\tfree storage = %d MB\n" \
"\t\tupload priority = %d\n" \
"\t\tstore_path_count = %d\n" \
"\t\tsubdir_count_per_path = %d\n" \
"\t\tstorage_port = %d\n" \
"\t\tstorage_http_port = %d\n" \
"\t\tcurrent_write_path = %d\n" \
"\t\tsource storage id = %s\n" \
"\t\tif_trunk_server = %d\n" \
"\t\tconnection.alloc_count = %d\n" \
"\t\tconnection.current_count = %d\n" \
"\t\tconnection.max_count = %d\n" \
"\t\ttotal_upload_count = %"PRId64"\n" \
"\t\tsuccess_upload_count = %"PRId64"\n" \
"\t\ttotal_append_count = %"PRId64"\n" \
"\t\tsuccess_append_count = %"PRId64"\n" \
"\t\ttotal_modify_count = %"PRId64"\n" \
"\t\tsuccess_modify_count = %"PRId64"\n" \
"\t\ttotal_truncate_count = %"PRId64"\n" \
"\t\tsuccess_truncate_count = %"PRId64"\n" \
"\t\ttotal_set_meta_count = %"PRId64"\n" \
"\t\tsuccess_set_meta_count = %"PRId64"\n" \
"\t\ttotal_delete_count = %"PRId64"\n" \
"\t\tsuccess_delete_count = %"PRId64"\n" \
"\t\ttotal_download_count = %"PRId64"\n" \
"\t\tsuccess_download_count = %"PRId64"\n" \
"\t\ttotal_get_meta_count = %"PRId64"\n" \
"\t\tsuccess_get_meta_count = %"PRId64"\n" \
"\t\ttotal_create_link_count = %"PRId64"\n" \
"\t\tsuccess_create_link_count = %"PRId64"\n"\
"\t\ttotal_delete_link_count = %"PRId64"\n" \
"\t\tsuccess_delete_link_count = %"PRId64"\n" \
"\t\ttotal_upload_bytes = %"PRId64"\n" \
"\t\tsuccess_upload_bytes = %"PRId64"\n" \
"\t\ttotal_append_bytes = %"PRId64"\n" \
"\t\tsuccess_append_bytes = %"PRId64"\n" \
"\t\ttotal_modify_bytes = %"PRId64"\n" \
"\t\tsuccess_modify_bytes = %"PRId64"\n" \
"\t\tstotal_download_bytes = %"PRId64"\n" \
"\t\tsuccess_download_bytes = %"PRId64"\n" \
"\t\ttotal_sync_in_bytes = %"PRId64"\n" \
"\t\tsuccess_sync_in_bytes = %"PRId64"\n" \
"\t\ttotal_sync_out_bytes = %"PRId64"\n" \
"\t\tsuccess_sync_out_bytes = %"PRId64"\n" \
"\t\ttotal_file_open_count = %"PRId64"\n" \
"\t\tsuccess_file_open_count = %"PRId64"\n" \
"\t\ttotal_file_read_count = %"PRId64"\n" \
"\t\tsuccess_file_read_count = %"PRId64"\n" \
"\t\ttotal_file_write_count = %"PRId64"\n" \
"\t\tsuccess_file_write_count = %"PRId64"\n" \
"\t\tlast_heart_beat_time = %s\n" \
"\t\tlast_source_update = %s\n" \
"\t\tlast_sync_update = %s\n" \
"\t\tlast_synced_timestamp = %s %s\n", \
++k, pStorage->id, pStorage->ip_addr, \
szHostnamePrompt, get_storage_status_caption( \
pStorage->status), pStorage->domain_name, \
pStorage->version, \
formatDatetime(pStorage->join_time, \
"%Y-%m-%d %H:%M:%S", \
szJoinTime, sizeof(szJoinTime)), \
szUpTime, pStorage->total_mb, \
pStorage->free_mb, \
pStorage->upload_priority, \
pStorage->store_path_count, \
pStorage->subdir_count_per_path, \
pStorage->storage_port, \
pStorage->storage_http_port, \
pStorage->current_write_path, \
pStorage->src_id, \
pStorage->if_trunk_server, \
pStorageStat->connection.alloc_count, \
pStorageStat->connection.current_count, \
pStorageStat->connection.max_count, \
pStorageStat->total_upload_count, \
pStorageStat->success_upload_count, \
pStorageStat->total_append_count, \
pStorageStat->success_append_count, \
pStorageStat->total_modify_count, \
pStorageStat->success_modify_count, \
pStorageStat->total_truncate_count, \
pStorageStat->success_truncate_count, \
pStorageStat->total_set_meta_count, \
pStorageStat->success_set_meta_count, \
pStorageStat->total_delete_count, \
pStorageStat->success_delete_count, \
pStorageStat->total_download_count, \
pStorageStat->success_download_count, \
pStorageStat->total_get_meta_count, \
pStorageStat->success_get_meta_count, \
pStorageStat->total_create_link_count, \
pStorageStat->success_create_link_count, \
pStorageStat->total_delete_link_count, \
pStorageStat->success_delete_link_count, \
pStorageStat->total_upload_bytes, \
pStorageStat->success_upload_bytes, \
pStorageStat->total_append_bytes, \
pStorageStat->success_append_bytes, \
pStorageStat->total_modify_bytes, \
pStorageStat->success_modify_bytes, \
pStorageStat->total_download_bytes, \
pStorageStat->success_download_bytes, \
pStorageStat->total_sync_in_bytes, \
pStorageStat->success_sync_in_bytes, \
pStorageStat->total_sync_out_bytes, \
pStorageStat->success_sync_out_bytes, \
pStorageStat->total_file_open_count, \
pStorageStat->success_file_open_count, \
pStorageStat->total_file_read_count, \
pStorageStat->success_file_read_count, \
pStorageStat->total_file_write_count, \
pStorageStat->success_file_write_count, \
formatDatetime(pStorageStat->last_heart_beat_time, \
"%Y-%m-%d %H:%M:%S", \
szLastHeartBeatTime, sizeof(szLastHeartBeatTime)), \
formatDatetime(pStorageStat->last_source_update, \
"%Y-%m-%d %H:%M:%S", \
szSrcUpdTime, sizeof(szSrcUpdTime)), \
formatDatetime(pStorageStat->last_sync_update, \
"%Y-%m-%d %H:%M:%S", \
szSyncUpdTime, sizeof(szSyncUpdTime)), \
formatDatetime(pStorageStat->last_synced_timestamp, \
"%Y-%m-%d %H:%M:%S", \
szSyncedTimestamp, sizeof(szSyncedTimestamp)),\
szSyncedDelaySeconds);
}
return 0;
}
static int list_all_groups(const char *group_name)
{
int result;
int group_count;
FDFSGroupStat group_stats[FDFS_MAX_GROUPS];
FDFSGroupStat *pGroupStat;
FDFSGroupStat *pGroupEnd;
int i;
result = tracker_list_groups(pTrackerServer, \
group_stats, FDFS_MAX_GROUPS, \
&group_count);
if (result != 0)
{
tracker_close_all_connections();
fdfs_client_destroy();
return result;
}
pGroupEnd = group_stats + group_count;
if (group_name == NULL)
{
printf("group count: %d\n", group_count);
i = 0;
for (pGroupStat=group_stats; pGroupStat<pGroupEnd; \
pGroupStat++)
{
printf( "\nGroup %d:\n", ++i);
list_storages(pGroupStat);
}
}
else
{
for (pGroupStat=group_stats; pGroupStat<pGroupEnd; \
pGroupStat++)
{
if (strcmp(pGroupStat->group_name, group_name) == 0)
{
list_storages(pGroupStat);
break;
}
}
}
return 0;
}

View File

@ -0,0 +1,691 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fdfs_client.h"
#include "fdfs_global.h"
#include "fastcommon/base64.h"
#include "fastcommon/sockopt.h"
#include "fastcommon/logger.h"
#include "fdfs_http_shared.h"
int writeToFileCallback(void *arg, const int64_t file_size, const char *data, \
const int current_size)
{
if (arg == NULL)
{
return EINVAL;
}
if (fwrite(data, current_size, 1, (FILE *)arg) != 1)
{
return errno != 0 ? errno : EIO;
}
return 0;
}
int uploadFileCallback(void *arg, const int64_t file_size, int sock)
{
int64_t total_send_bytes;
char *filename;
if (arg == NULL)
{
return EINVAL;
}
filename = (char *)arg;
return tcpsendfile(sock, filename, file_size, \
g_fdfs_network_timeout, &total_send_bytes);
}
int main(int argc, char *argv[])
{
char *conf_filename;
char *local_filename;
ConnectionInfo *pTrackerServer;
ConnectionInfo *pStorageServer;
int result;
ConnectionInfo storageServer;
char group_name[FDFS_GROUP_NAME_MAX_LEN + 1];
char remote_filename[256];
char master_filename[256];
FDFSMetaData meta_list[32];
int meta_count;
int i;
FDFSMetaData *pMetaList;
char token[32 + 1];
char file_id[128];
char file_url[256];
char szDatetime[20];
char szPortPart[16];
int url_len;
time_t ts;
char *file_buff;
int64_t file_size;
char *operation;
char *meta_buff;
int store_path_index;
FDFSFileInfo file_info;
printf("This is FastDFS client test program v%d.%02d\n" \
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
"\nFastDFS may be copied only under the terms of the GNU General\n" \
"Public License V3, which may be found in the FastDFS source kit.\n" \
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
"for more detail.\n\n" \
, g_fdfs_version.major, g_fdfs_version.minor);
if (argc < 3)
{
printf("Usage: %s <config_file> <operation>\n" \
"\toperation: upload, download, getmeta, setmeta, " \
"delete and query_servers\n", argv[0]);
return 1;
}
log_init();
g_log_context.log_level = LOG_DEBUG;
conf_filename = argv[1];
operation = argv[2];
if ((result=fdfs_client_init(conf_filename)) != 0)
{
return result;
}
pTrackerServer = tracker_get_connection();
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
pStorageServer = NULL;
*group_name = '\0';
local_filename = NULL;
if (strcmp(operation, "upload") == 0)
{
int upload_type;
char *prefix_name;
const char *file_ext_name;
char slave_filename[256];
int slave_filename_len;
if (argc < 4)
{
printf("Usage: %s <config_file> upload " \
"<local_filename> [FILE | BUFF | CALLBACK] \n",\
argv[0]);
fdfs_client_destroy();
return EINVAL;
}
local_filename = argv[3];
if (argc == 4)
{
upload_type = FDFS_UPLOAD_BY_FILE;
}
else
{
if (strcmp(argv[4], "BUFF") == 0)
{
upload_type = FDFS_UPLOAD_BY_BUFF;
}
else if (strcmp(argv[4], "CALLBACK") == 0)
{
upload_type = FDFS_UPLOAD_BY_CALLBACK;
}
else
{
upload_type = FDFS_UPLOAD_BY_FILE;
}
}
store_path_index = 0;
{
ConnectionInfo storageServers[FDFS_MAX_SERVERS_EACH_GROUP];
ConnectionInfo *pServer;
ConnectionInfo *pServerEnd;
int storage_count;
if ((result=tracker_query_storage_store_list_without_group( \
pTrackerServer, storageServers, \
FDFS_MAX_SERVERS_EACH_GROUP, &storage_count, \
group_name, &store_path_index)) == 0)
{
printf("tracker_query_storage_store_list_without_group: \n");
pServerEnd = storageServers + storage_count;
for (pServer=storageServers; pServer<pServerEnd; pServer++)
{
printf("\tserver %d. group_name=%s, " \
"ip_addr=%s, port=%d\n", \
(int)(pServer - storageServers) + 1, \
group_name, pServer->ip_addr, pServer->port);
}
printf("\n");
}
}
if ((result=tracker_query_storage_store(pTrackerServer, \
&storageServer, group_name, &store_path_index)) != 0)
{
fdfs_client_destroy();
printf("tracker_query_storage fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
return result;
}
printf("group_name=%s, ip_addr=%s, port=%d\n", \
group_name, storageServer.ip_addr, \
storageServer.port);
if ((pStorageServer=tracker_make_connection(&storageServer, \
&result)) == NULL)
{
fdfs_client_destroy();
return result;
}
memset(&meta_list, 0, sizeof(meta_list));
meta_count = 0;
strcpy(meta_list[meta_count].name, "ext_name");
strcpy(meta_list[meta_count].value, "jpg");
meta_count++;
strcpy(meta_list[meta_count].name, "width");
strcpy(meta_list[meta_count].value, "160");
meta_count++;
strcpy(meta_list[meta_count].name, "height");
strcpy(meta_list[meta_count].value, "80");
meta_count++;
strcpy(meta_list[meta_count].name, "file_size");
strcpy(meta_list[meta_count].value, "115120");
meta_count++;
file_ext_name = fdfs_get_file_ext_name(local_filename);
*group_name = '\0';
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
result = storage_upload_by_filename(pTrackerServer, \
pStorageServer, store_path_index, \
local_filename, file_ext_name, \
meta_list, meta_count, \
group_name, remote_filename);
printf("storage_upload_by_filename\n");
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_upload_by_filebuff(pTrackerServer, \
pStorageServer, store_path_index, \
file_content, file_size, file_ext_name, \
meta_list, meta_count, \
group_name, remote_filename);
free(file_content);
}
printf("storage_upload_by_filebuff\n");
}
else
{
struct stat stat_buf;
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_upload_by_callback(pTrackerServer, \
pStorageServer, store_path_index, \
uploadFileCallback, local_filename, \
file_size, file_ext_name, \
meta_list, meta_count, \
group_name, remote_filename);
}
printf("storage_upload_by_callback\n");
}
if (result != 0)
{
printf("upload file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
if (g_tracker_server_http_port == 80)
{
*szPortPart = '\0';
}
else
{
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
}
sprintf(file_id, "%s/%s", group_name, remote_filename);
url_len = sprintf(file_url, "http://%s%s/%s", \
pStorageServer->ip_addr, szPortPart, file_id);
if (g_anti_steal_token)
{
ts = time(NULL);
fdfs_http_gen_token(&g_anti_steal_secret_key, file_id, \
ts, token);
sprintf(file_url + url_len, "?token=%s&ts=%d", \
token, (int)ts);
}
printf("group_name=%s, remote_filename=%s\n", \
group_name, remote_filename);
fdfs_get_file_info(group_name, remote_filename, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
printf("file crc32=%u\n", file_info.crc32);
printf("example file url: %s\n", file_url);
strcpy(master_filename, remote_filename);
*remote_filename = '\0';
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
prefix_name = "_big";
result = storage_upload_slave_by_filename(pTrackerServer,
NULL, local_filename, master_filename, \
prefix_name, file_ext_name, \
meta_list, meta_count, \
group_name, remote_filename);
printf("storage_upload_slave_by_filename\n");
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
prefix_name = "1024x1024";
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_upload_slave_by_filebuff(pTrackerServer, \
NULL, file_content, file_size, master_filename,
prefix_name, file_ext_name, \
meta_list, meta_count, \
group_name, remote_filename);
free(file_content);
}
printf("storage_upload_slave_by_filebuff\n");
}
else
{
struct stat stat_buf;
prefix_name = "-small";
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_upload_slave_by_callback(pTrackerServer, \
NULL, uploadFileCallback, local_filename, \
file_size, master_filename, prefix_name, \
file_ext_name, meta_list, meta_count, \
group_name, remote_filename);
}
printf("storage_upload_slave_by_callback\n");
}
if (result != 0)
{
printf("upload slave file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
if (g_tracker_server_http_port == 80)
{
*szPortPart = '\0';
}
else
{
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
}
sprintf(file_id, "%s/%s", group_name, remote_filename);
url_len = sprintf(file_url, "http://%s%s/%s", \
pStorageServer->ip_addr, szPortPart, file_id);
if (g_anti_steal_token)
{
ts = time(NULL);
fdfs_http_gen_token(&g_anti_steal_secret_key, file_id, \
ts, token);
sprintf(file_url + url_len, "?token=%s&ts=%d", \
token, (int)ts);
}
printf("group_name=%s, remote_filename=%s\n", \
group_name, remote_filename);
fdfs_get_file_info(group_name, remote_filename, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
printf("file crc32=%u\n", file_info.crc32);
printf("example file url: %s\n", file_url);
if (fdfs_gen_slave_filename(master_filename, \
prefix_name, file_ext_name, \
slave_filename, &slave_filename_len) == 0)
{
if (strcmp(remote_filename, slave_filename) != 0)
{
printf("slave_filename=%s\n" \
"remote_filename=%s\n" \
"not equal!\n", \
slave_filename, remote_filename);
}
}
}
else if (strcmp(operation, "download") == 0 ||
strcmp(operation, "getmeta") == 0 ||
strcmp(operation, "setmeta") == 0 ||
strcmp(operation, "query_servers") == 0 ||
strcmp(operation, "delete") == 0)
{
if (argc < 5)
{
printf("Usage: %s <config_file> %s " \
"<group_name> <remote_filename>\n", \
argv[0], operation);
fdfs_client_destroy();
return EINVAL;
}
snprintf(group_name, sizeof(group_name), "%s", argv[3]);
snprintf(remote_filename, sizeof(remote_filename), \
"%s", argv[4]);
if (strcmp(operation, "setmeta") == 0 ||
strcmp(operation, "delete") == 0)
{
result = tracker_query_storage_update(pTrackerServer, \
&storageServer, group_name, remote_filename);
}
else if (strcmp(operation, "query_servers") == 0)
{
ConnectionInfo storageServers[FDFS_MAX_SERVERS_EACH_GROUP];
int server_count;
result = tracker_query_storage_list(pTrackerServer, \
storageServers, FDFS_MAX_SERVERS_EACH_GROUP, \
&server_count, group_name, remote_filename);
if (result != 0)
{
printf("tracker_query_storage_list fail, "\
"group_name=%s, filename=%s, " \
"error no: %d, error info: %s\n", \
group_name, remote_filename, \
result, STRERROR(result));
}
else
{
printf("server list (%d):\n", server_count);
for (i=0; i<server_count; i++)
{
printf("\t%s:%d\n", \
storageServers[i].ip_addr, \
storageServers[i].port);
}
printf("\n");
}
tracker_close_connection_ex(pTrackerServer, result != 0);
fdfs_client_destroy();
return result;
}
else
{
result = tracker_query_storage_fetch(pTrackerServer, \
&storageServer, group_name, remote_filename);
}
if (result != 0)
{
fdfs_client_destroy();
printf("tracker_query_storage_fetch fail, " \
"group_name=%s, filename=%s, " \
"error no: %d, error info: %s\n", \
group_name, remote_filename, \
result, STRERROR(result));
return result;
}
printf("storage=%s:%d\n", storageServer.ip_addr, \
storageServer.port);
if ((pStorageServer=tracker_make_connection(&storageServer, \
&result)) == NULL)
{
fdfs_client_destroy();
return result;
}
if (strcmp(operation, "download") == 0)
{
if (argc >= 6)
{
local_filename = argv[5];
if (strcmp(local_filename, "CALLBACK") == 0)
{
FILE *fp;
fp = fopen(local_filename, "wb");
if (fp == NULL)
{
result = errno != 0 ? errno : EPERM;
printf("open file \"%s\" fail, " \
"errno: %d, error info: %s", \
local_filename, result, \
STRERROR(result));
}
else
{
result = storage_download_file_ex( \
pTrackerServer, pStorageServer, \
group_name, remote_filename, 0, 0, \
writeToFileCallback, fp, &file_size);
fclose(fp);
}
}
else
{
result = storage_download_file_to_file( \
pTrackerServer, pStorageServer, \
group_name, remote_filename, \
local_filename, &file_size);
}
}
else
{
file_buff = NULL;
if ((result=storage_download_file_to_buff( \
pTrackerServer, pStorageServer, \
group_name, remote_filename, \
&file_buff, &file_size)) == 0)
{
local_filename = strrchr( \
remote_filename, '/');
if (local_filename != NULL)
{
local_filename++; //skip /
}
else
{
local_filename=remote_filename;
}
result = writeToFile(local_filename, \
file_buff, file_size);
free(file_buff);
}
}
if (result == 0)
{
printf("download file success, " \
"file size=%"PRId64", file save to %s\n", \
file_size, local_filename);
}
else
{
printf("download file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
}
else if (strcmp(operation, "getmeta") == 0)
{
if ((result=storage_get_metadata(pTrackerServer, \
pStorageServer, group_name, remote_filename, \
&pMetaList, &meta_count)) == 0)
{
printf("get meta data success, " \
"meta count=%d\n", meta_count);
for (i=0; i<meta_count; i++)
{
printf("%s=%s\n", \
pMetaList[i].name, \
pMetaList[i].value);
}
free(pMetaList);
}
else
{
printf("getmeta fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
}
else if (strcmp(operation, "setmeta") == 0)
{
if (argc < 7)
{
printf("Usage: %s <config_file> %s " \
"<group_name> <remote_filename> " \
"<op_flag> <metadata_list>\n" \
"\top_flag: %c for overwrite, " \
"%c for merge\n" \
"\tmetadata_list: name1=value1," \
"name2=value2,...\n", \
argv[0], operation, \
STORAGE_SET_METADATA_FLAG_OVERWRITE, \
STORAGE_SET_METADATA_FLAG_MERGE);
fdfs_client_destroy();
return EINVAL;
}
meta_buff = strdup(argv[6]);
if (meta_buff == NULL)
{
printf("Out of memory!\n");
return ENOMEM;
}
pMetaList = fdfs_split_metadata_ex(meta_buff, \
',', '=', &meta_count, &result);
if (pMetaList == NULL)
{
printf("Out of memory!\n");
free(meta_buff);
return ENOMEM;
}
if ((result=storage_set_metadata(pTrackerServer, \
NULL, group_name, remote_filename, \
pMetaList, meta_count, *argv[5])) == 0)
{
printf("set meta data success\n");
}
else
{
printf("setmeta fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
free(meta_buff);
free(pMetaList);
}
else if(strcmp(operation, "delete") == 0)
{
if ((result=storage_delete_file(pTrackerServer, \
NULL, group_name, remote_filename)) == 0)
{
printf("delete file success\n");
}
else
{
printf("delete file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
}
}
else
{
fdfs_client_destroy();
printf("invalid operation: %s\n", operation);
return EINVAL;
}
/* for test only */
if ((result=fdfs_active_test(pTrackerServer)) != 0)
{
printf("active_test to tracker server %s:%d fail, errno: %d\n", \
pTrackerServer->ip_addr, pTrackerServer->port, result);
}
/* for test only */
if ((result=fdfs_active_test(pStorageServer)) != 0)
{
printf("active_test to storage server %s:%d fail, errno: %d\n", \
pStorageServer->ip_addr, pStorageServer->port, result);
}
tracker_close_connection_ex(pStorageServer, true);
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return result;
}

View File

@ -0,0 +1,658 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <string.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include "fdfs_client.h"
#include "fdfs_global.h"
#include "fastcommon/base64.h"
#include "fdfs_http_shared.h"
#include "fastcommon/sockopt.h"
#include "fastcommon/logger.h"
int writeToFileCallback(void *arg, const int64_t file_size, const char *data, \
const int current_size)
{
if (arg == NULL)
{
return EINVAL;
}
if (fwrite(data, current_size, 1, (FILE *)arg) != 1)
{
return errno != 0 ? errno : EIO;
}
return 0;
}
int uploadFileCallback(void *arg, const int64_t file_size, int sock)
{
int64_t total_send_bytes;
char *filename;
if (arg == NULL)
{
return EINVAL;
}
filename = (char *)arg;
return tcpsendfile(sock, filename, file_size, \
g_fdfs_network_timeout, &total_send_bytes);
}
int main(int argc, char *argv[])
{
char *conf_filename;
char *local_filename;
ConnectionInfo *pTrackerServer;
ConnectionInfo *pStorageServer;
int result;
ConnectionInfo storageServer;
char group_name[FDFS_GROUP_NAME_MAX_LEN + 1];
FDFSMetaData meta_list[32];
int meta_count;
int i;
FDFSMetaData *pMetaList;
char token[32 + 1];
char file_id[128];
char master_file_id[128];
char file_url[256];
char szDatetime[20];
char szPortPart[16];
int url_len;
time_t ts;
char *file_buff;
int64_t file_size;
char *operation;
char *meta_buff;
int store_path_index;
FDFSFileInfo file_info;
printf("This is FastDFS client test program v%d.%02d\n" \
"\nCopyright (C) 2008, Happy Fish / YuQing\n" \
"\nFastDFS may be copied only under the terms of the GNU General\n" \
"Public License V3, which may be found in the FastDFS source kit.\n" \
"Please visit the FastDFS Home Page http://www.fastken.com/ \n" \
"for more detail.\n\n" \
, g_fdfs_version.major, g_fdfs_version.minor);
if (argc < 3)
{
printf("Usage: %s <config_file> <operation>\n" \
"\toperation: upload, download, getmeta, setmeta, " \
"delete and query_servers\n", argv[0]);
return 1;
}
log_init();
g_log_context.log_level = LOG_DEBUG;
conf_filename = argv[1];
operation = argv[2];
if ((result=fdfs_client_init(conf_filename)) != 0)
{
return result;
}
pTrackerServer = tracker_get_connection();
if (pTrackerServer == NULL)
{
fdfs_client_destroy();
return errno != 0 ? errno : ECONNREFUSED;
}
local_filename = NULL;
if (strcmp(operation, "upload") == 0)
{
int upload_type;
char *prefix_name;
const char *file_ext_name;
char slave_file_id[256];
int slave_file_id_len;
if (argc < 4)
{
printf("Usage: %s <config_file> upload " \
"<local_filename> [FILE | BUFF | CALLBACK] \n",\
argv[0]);
fdfs_client_destroy();
return EINVAL;
}
local_filename = argv[3];
if (argc == 4)
{
upload_type = FDFS_UPLOAD_BY_FILE;
}
else
{
if (strcmp(argv[4], "BUFF") == 0)
{
upload_type = FDFS_UPLOAD_BY_BUFF;
}
else if (strcmp(argv[4], "CALLBACK") == 0)
{
upload_type = FDFS_UPLOAD_BY_CALLBACK;
}
else
{
upload_type = FDFS_UPLOAD_BY_FILE;
}
}
{
ConnectionInfo storageServers[FDFS_MAX_SERVERS_EACH_GROUP];
ConnectionInfo *pServer;
ConnectionInfo *pServerEnd;
int storage_count;
strcpy(group_name, "group1");
if ((result=tracker_query_storage_store_list_with_group( \
pTrackerServer, group_name, storageServers, \
FDFS_MAX_SERVERS_EACH_GROUP, &storage_count, \
&store_path_index)) == 0)
{
printf("tracker_query_storage_store_list_with_group: \n");
pServerEnd = storageServers + storage_count;
for (pServer=storageServers; pServer<pServerEnd; pServer++)
{
printf("\tserver %d. group_name=%s, " \
"ip_addr=%s, port=%d\n", \
(int)(pServer - storageServers) + 1, \
group_name, pServer->ip_addr, \
pServer->port);
}
printf("\n");
}
}
*group_name = '\0';
if ((result=tracker_query_storage_store(pTrackerServer, \
&storageServer, group_name, &store_path_index)) != 0)
{
fdfs_client_destroy();
printf("tracker_query_storage fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
return result;
}
printf("group_name=%s, ip_addr=%s, port=%d\n", \
group_name, storageServer.ip_addr, \
storageServer.port);
if ((pStorageServer=tracker_make_connection(&storageServer, \
&result)) == NULL)
{
fdfs_client_destroy();
return result;
}
memset(&meta_list, 0, sizeof(meta_list));
meta_count = 0;
strcpy(meta_list[meta_count].name, "ext_name");
strcpy(meta_list[meta_count].value, "jpg");
meta_count++;
strcpy(meta_list[meta_count].name, "width");
strcpy(meta_list[meta_count].value, "160");
meta_count++;
strcpy(meta_list[meta_count].name, "height");
strcpy(meta_list[meta_count].value, "80");
meta_count++;
strcpy(meta_list[meta_count].name, "file_size");
strcpy(meta_list[meta_count].value, "115120");
meta_count++;
file_ext_name = fdfs_get_file_ext_name(local_filename);
strcpy(group_name, "");
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
printf("storage_upload_by_filename\n");
result = storage_upload_by_filename1(pTrackerServer, \
pStorageServer, store_path_index, \
local_filename, file_ext_name, \
meta_list, meta_count, \
group_name, file_id);
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
printf("storage_upload_by_filebuff\n");
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_upload_by_filebuff1(pTrackerServer, \
pStorageServer, store_path_index, \
file_content, file_size, file_ext_name, \
meta_list, meta_count, \
group_name, file_id);
free(file_content);
}
}
else
{
struct stat stat_buf;
printf("storage_upload_by_callback\n");
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_upload_by_callback1(pTrackerServer, \
pStorageServer, store_path_index, \
uploadFileCallback, local_filename, \
file_size, file_ext_name, \
meta_list, meta_count, \
group_name, file_id);
}
}
if (result != 0)
{
printf("upload file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
if (g_tracker_server_http_port == 80)
{
*szPortPart = '\0';
}
else
{
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
}
url_len = sprintf(file_url, "http://%s%s/%s", \
pStorageServer->ip_addr, szPortPart, file_id);
if (g_anti_steal_token)
{
ts = time(NULL);
fdfs_http_gen_token(&g_anti_steal_secret_key, \
file_id, ts, token);
sprintf(file_url + url_len, "?token=%s&ts=%d", \
token, (int)ts);
}
fdfs_get_file_info1(file_id, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
printf("file crc32=%u\n", file_info.crc32);
printf("example file url: %s\n", file_url);
strcpy(master_file_id, file_id);
*file_id = '\0';
if (upload_type == FDFS_UPLOAD_BY_FILE)
{
prefix_name = "_big";
printf("storage_upload_slave_by_filename\n");
result = storage_upload_slave_by_filename1( \
pTrackerServer, NULL, \
local_filename, master_file_id, \
prefix_name, file_ext_name, \
meta_list, meta_count, file_id);
}
else if (upload_type == FDFS_UPLOAD_BY_BUFF)
{
char *file_content;
prefix_name = "1024x1024";
printf("storage_upload_slave_by_filebuff\n");
if ((result=getFileContent(local_filename, \
&file_content, &file_size)) == 0)
{
result = storage_upload_slave_by_filebuff1( \
pTrackerServer, NULL, file_content, file_size, \
master_file_id, prefix_name, file_ext_name, \
meta_list, meta_count, file_id);
free(file_content);
}
}
else
{
struct stat stat_buf;
prefix_name = "_small";
printf("storage_upload_slave_by_callback\n");
if (stat(local_filename, &stat_buf) == 0 && \
S_ISREG(stat_buf.st_mode))
{
file_size = stat_buf.st_size;
result = storage_upload_slave_by_callback1( \
pTrackerServer, NULL, \
uploadFileCallback, local_filename, \
file_size, master_file_id, \
prefix_name, file_ext_name, \
meta_list, meta_count, file_id);
}
}
if (result != 0)
{
printf("upload slave file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
tracker_close_connection_ex(pStorageServer, true);
fdfs_client_destroy();
return result;
}
if (g_tracker_server_http_port == 80)
{
*szPortPart = '\0';
}
else
{
sprintf(szPortPart, ":%d", g_tracker_server_http_port);
}
url_len = sprintf(file_url, "http://%s%s/%s", \
pStorageServer->ip_addr, szPortPart, file_id);
if (g_anti_steal_token)
{
ts = time(NULL);
fdfs_http_gen_token(&g_anti_steal_secret_key, \
file_id, ts, token);
sprintf(file_url + url_len, "?token=%s&ts=%d", \
token, (int)ts);
}
fdfs_get_file_info1(file_id, &file_info);
printf("source ip address: %s\n", file_info.source_ip_addr);
printf("file timestamp=%s\n", formatDatetime(
file_info.create_timestamp, "%Y-%m-%d %H:%M:%S", \
szDatetime, sizeof(szDatetime)));
printf("file size=%"PRId64"\n", file_info.file_size);
printf("file crc32=%u\n", file_info.crc32);
printf("example file url: %s\n", file_url);
if (fdfs_gen_slave_filename(master_file_id, \
prefix_name, file_ext_name, \
slave_file_id, &slave_file_id_len) == 0)
{
if (strcmp(file_id, slave_file_id) != 0)
{
printf("slave_file_id=%s\n" \
"file_id=%s\n" \
"not equal!\n", \
slave_file_id, file_id);
}
}
}
else if (strcmp(operation, "download") == 0 ||
strcmp(operation, "getmeta") == 0 ||
strcmp(operation, "setmeta") == 0 ||
strcmp(operation, "query_servers") == 0 ||
strcmp(operation, "delete") == 0)
{
if (argc < 4)
{
printf("Usage: %s <config_file> %s " \
"<file_id>\n", \
argv[0], operation);
fdfs_client_destroy();
return EINVAL;
}
snprintf(file_id, sizeof(file_id), "%s", argv[3]);
if (strcmp(operation, "query_servers") == 0)
{
ConnectionInfo storageServers[FDFS_MAX_SERVERS_EACH_GROUP];
int server_count;
result = tracker_query_storage_list1(pTrackerServer, \
storageServers, FDFS_MAX_SERVERS_EACH_GROUP, \
&server_count, file_id);
if (result != 0)
{
printf("tracker_query_storage_list1 fail, "\
"file_id=%s, " \
"error no: %d, error info: %s\n", \
file_id, result, STRERROR(result));
}
else
{
printf("server list (%d):\n", server_count);
for (i=0; i<server_count; i++)
{
printf("\t%s:%d\n", \
storageServers[i].ip_addr, \
storageServers[i].port);
}
printf("\n");
}
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return result;
}
if ((result=tracker_query_storage_fetch1(pTrackerServer, \
&storageServer, file_id)) != 0)
{
fdfs_client_destroy();
printf("tracker_query_storage_fetch fail, " \
"file_id=%s, " \
"error no: %d, error info: %s\n", \
file_id, result, STRERROR(result));
return result;
}
printf("storage=%s:%d\n", storageServer.ip_addr, \
storageServer.port);
if ((pStorageServer=tracker_make_connection(&storageServer, \
&result)) == NULL)
{
fdfs_client_destroy();
return result;
}
if (strcmp(operation, "download") == 0)
{
if (argc >= 5)
{
local_filename = argv[4];
if (strcmp(local_filename, "CALLBACK") == 0)
{
FILE *fp;
fp = fopen(local_filename, "wb");
if (fp == NULL)
{
result = errno != 0 ? errno : EPERM;
printf("open file \"%s\" fail, " \
"errno: %d, error info: %s", \
local_filename, result, \
STRERROR(result));
}
else
{
result = storage_download_file_ex1( \
pTrackerServer, pStorageServer, \
file_id, 0, 0, \
writeToFileCallback, fp, &file_size);
fclose(fp);
}
}
else
{
result = storage_download_file_to_file1( \
pTrackerServer, pStorageServer, \
file_id, \
local_filename, &file_size);
}
}
else
{
file_buff = NULL;
if ((result=storage_download_file_to_buff1( \
pTrackerServer, pStorageServer, \
file_id, \
&file_buff, &file_size)) == 0)
{
local_filename = strrchr( \
file_id, '/');
if (local_filename != NULL)
{
local_filename++; //skip /
}
else
{
local_filename=file_id;
}
result = writeToFile(local_filename, \
file_buff, file_size);
free(file_buff);
}
}
if (result == 0)
{
printf("download file success, " \
"file size=%"PRId64", file save to %s\n", \
file_size, local_filename);
}
else
{
printf("download file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
}
else if (strcmp(operation, "getmeta") == 0)
{
if ((result=storage_get_metadata1(pTrackerServer, \
NULL, file_id, \
&pMetaList, &meta_count)) == 0)
{
printf("get meta data success, " \
"meta count=%d\n", meta_count);
for (i=0; i<meta_count; i++)
{
printf("%s=%s\n", \
pMetaList[i].name, \
pMetaList[i].value);
}
free(pMetaList);
}
else
{
printf("getmeta fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
}
else if (strcmp(operation, "setmeta") == 0)
{
if (argc < 6)
{
printf("Usage: %s <config_file> %s " \
"<file_id> " \
"<op_flag> <metadata_list>\n" \
"\top_flag: %c for overwrite, " \
"%c for merge\n" \
"\tmetadata_list: name1=value1," \
"name2=value2,...\n", \
argv[0], operation, \
STORAGE_SET_METADATA_FLAG_OVERWRITE, \
STORAGE_SET_METADATA_FLAG_MERGE);
fdfs_client_destroy();
return EINVAL;
}
meta_buff = strdup(argv[5]);
if (meta_buff == NULL)
{
printf("Out of memory!\n");
return ENOMEM;
}
pMetaList = fdfs_split_metadata_ex(meta_buff, \
',', '=', &meta_count, &result);
if (pMetaList == NULL)
{
printf("Out of memory!\n");
free(meta_buff);
return ENOMEM;
}
if ((result=storage_set_metadata1(pTrackerServer, \
NULL, file_id, \
pMetaList, meta_count, *argv[4])) == 0)
{
printf("set meta data success\n");
}
else
{
printf("setmeta fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
free(meta_buff);
free(pMetaList);
}
else if(strcmp(operation, "delete") == 0)
{
if ((result=storage_delete_file1(pTrackerServer, \
NULL, file_id)) == 0)
{
printf("delete file success\n");
}
else
{
printf("delete file fail, " \
"error no: %d, error info: %s\n", \
result, STRERROR(result));
}
}
}
else
{
fdfs_client_destroy();
printf("invalid operation: %s\n", operation);
return EINVAL;
}
/* for test only */
if ((result=fdfs_active_test(pTrackerServer)) != 0)
{
printf("active_test to tracker server %s:%d fail, errno: %d\n", \
pTrackerServer->ip_addr, pTrackerServer->port, result);
}
/* for test only */
if ((result=fdfs_active_test(pStorageServer)) != 0)
{
printf("active_test to storage server %s:%d fail, errno: %d\n", \
pStorageServer->ip_addr, pStorageServer->port, result);
}
tracker_close_connection_ex(pStorageServer, true);
tracker_close_connection_ex(pTrackerServer, true);
fdfs_client_destroy();
return result;
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,363 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#ifndef TRACKER_CLIENT_H
#define TRACKER_CLIENT_H
#include "tracker_types.h"
#include "tracker_proto.h"
#include "client_global.h"
#ifdef __cplusplus
extern "C" {
#endif
typedef struct
{
bool if_trunk_server;
char status;
char id[FDFS_STORAGE_ID_MAX_SIZE];
char ip_addr[IP_ADDRESS_SIZE];
char src_id[FDFS_STORAGE_ID_MAX_SIZE]; //src storage id
char domain_name[FDFS_DOMAIN_NAME_MAX_SIZE]; //http domain name
char version[FDFS_VERSION_SIZE];
int total_mb; //total disk storage in MB
int free_mb; //free disk storage in MB
int upload_priority; //upload priority
time_t join_time; //storage join timestamp (create timestamp)
time_t up_time; //storage service started timestamp
int store_path_count; //store base path count of each storage server
int subdir_count_per_path;
int storage_port;
int storage_http_port; //storage http server port
int current_write_path; //current write path index
FDFSStorageStat stat;
} FDFSStorageInfo;
#define CHECK_CONNECTION(pTrackerServer, conn, result, new_connection) \
do { \
if (pTrackerServer->sock < 0) \
{ \
if ((conn=tracker_make_connection( \
pTrackerServer, &result)) != NULL) \
{ \
return result; \
} \
new_connection = true; \
} \
else \
{ \
conn = pTrackerServer; \
new_connection = false; \
} \
} while (0)
#define tracker_get_connection() \
tracker_get_connection_ex((&g_tracker_group))
/**
* get a connection to tracker server
* params:
* pTrackerGroup: the tracker group
* return: != NULL for success, NULL for fail
**/
ConnectionInfo *tracker_get_connection_ex(TrackerServerGroup *pTrackerGroup);
#define tracker_get_connection_r(pTrackerServer, err_no) \
tracker_get_connection_r_ex((&g_tracker_group), pTrackerServer, err_no)
/**
* get a connection to tracker server
* params:
* pTrackerGroup: the tracker group
* pTrackerServer: tracker server
* return: 0 success, !=0 fail
**/
ConnectionInfo *tracker_get_connection_r_ex(TrackerServerGroup *pTrackerGroup, \
TrackerServerInfo *pTrackerServer, int *err_no);
#define tracker_get_all_connections() \
tracker_get_all_connections_ex((&g_tracker_group))
/**
* get a connection to tracker server without connection pool
* params:
* pTrackerGroup: the tracker group
* return: != NULL for success, NULL for fail
**/
ConnectionInfo *tracker_get_connection_no_pool( \
TrackerServerGroup *pTrackerGroup);
/**
* connect to all tracker servers
* params:
* pTrackerGroup: the tracker group
* return: 0 success, !=0 fail, return the error code
**/
int tracker_get_all_connections_ex(TrackerServerGroup *pTrackerGroup);
#define tracker_close_all_connections() \
tracker_close_all_connections_ex((&g_tracker_group))
/**
* close all connections to tracker servers
* params:
* pTrackerGroup: the tracker group
* return:
**/
void tracker_close_all_connections_ex(TrackerServerGroup *pTrackerGroup);
/**
* list one group
* params:
* pTrackerServer: tracker server
* group_name: the group name
* pDest: return the group info
* return: 0 success, !=0 fail, return the error code
**/
int tracker_list_one_group(ConnectionInfo *pTrackerServer, \
const char *group_name, FDFSGroupStat *pDest);
/**
* list all groups
* params:
* pTrackerServer: tracker server
* group_stats: return group info array
* max_groups: max group count(group array capacity)
* group_count: return group count
* return: 0 success, !=0 fail, return the error code
**/
int tracker_list_groups(ConnectionInfo *pTrackerServer, \
FDFSGroupStat *group_stats, const int max_groups, \
int *group_count);
/**
* list all servers of the specified group
* params:
* pTrackerServer: tracker server
* szGroupName: group name to query
* szStorageId: the storage id to query, can be NULL or empty
* storage_infos: return storage info array
* max_storages: max storage count(storage array capacity)
* storage_count: return storage count
* return: 0 success, !=0 fail, return the error code
**/
int tracker_list_servers(ConnectionInfo *pTrackerServer, \
const char *szGroupName, const char *szStorageId, \
FDFSStorageInfo *storage_infos, const int max_storages, \
int *storage_count);
#define tracker_query_storage_store(pTrackerServer, pStorageServer, \
group_name, store_path_index) \
tracker_query_storage_store_without_group(pTrackerServer, \
pStorageServer, group_name, store_path_index)
/**
* query storage server to upload file
* params:
* pTrackerServer: tracker server
* pStorageServer: return storage server
* store_path_index: return the index of path on the storage server
* return: 0 success, !=0 fail, return the error code
**/
int tracker_query_storage_store_without_group(ConnectionInfo *pTrackerServer,
ConnectionInfo *pStorageServer, char *group_name,
int *store_path_index);
/**
* query storage servers/list to upload file
* params:
* pTrackerServer: tracker server
* storageServers: store the storage server list
* nMaxServerCount: max storage server count
* storage_count: return the storage server count
* store_path_index: return the index of path on the storage server
* return: 0 success, !=0 fail, return the error code
**/
#define tracker_query_storage_store_list_without_group( \
pTrackerServer, storageServers, nMaxServerCount, \
storage_count, group_name, store_path_index) \
tracker_query_storage_store_list_with_group( \
pTrackerServer, NULL, storageServers, nMaxServerCount, \
storage_count, store_path_index)
/**
* query storage server to upload file
* params:
* pTrackerServer: tracker server
* group_name: the group name to upload file to
* pStorageServer: return storage server
* store_path_index: return the index of path on the storage server
* return: 0 success, !=0 fail, return the error code
**/
int tracker_query_storage_store_with_group(ConnectionInfo *pTrackerServer, \
const char *group_name, ConnectionInfo *pStorageServer, \
int *store_path_index);
/**
* query storage servers/list to upload file
* params:
* pTrackerServer: tracker server
* group_name: the group name to upload file to
* storageServers: store the storage server list
* nMaxServerCount: max storage server count
* storage_count: return the storage server count
* store_path_index: return the index of path on the storage server
* return: 0 success, !=0 fail, return the error code
**/
int tracker_query_storage_store_list_with_group( \
ConnectionInfo *pTrackerServer, const char *group_name, \
ConnectionInfo *storageServers, const int nMaxServerCount, \
int *storage_count, int *store_path_index);
/**
* query storage server to update (delete file or set meta data)
* params:
* pTrackerServer: tracker server
* pStorageServer: return storage server
* group_name: the group name of storage server
* filename: filename on storage server
* return: 0 success, !=0 fail, return the error code
**/
#define tracker_query_storage_update(pTrackerServer, \
pStorageServer, group_name, filename) \
tracker_do_query_storage(pTrackerServer, \
pStorageServer, TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE,\
group_name, filename)
/**
* query storage server to download file
* params:
* pTrackerServer: tracker server
* pStorageServer: return storage server
* group_name: the group name of storage server
* filename: filename on storage server
* return: 0 success, !=0 fail, return the error code
**/
#define tracker_query_storage_fetch(pTrackerServer, \
pStorageServer, group_name, filename) \
tracker_do_query_storage(pTrackerServer, \
pStorageServer, TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE,\
group_name, filename)
/**
* query storage server to fetch or update
* params:
* pTrackerServer: tracker server
* pStorageServer: return storage server
* cmd : command, TRACKER_PROTO_CMD_SERVICE_QUERY_FETCH_ONE or
* TRACKER_PROTO_CMD_SERVICE_QUERY_UPDATE
* group_name: the group name of storage server
* filename: filename on storage server
* return: 0 success, !=0 fail, return the error code
**/
int tracker_do_query_storage(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const byte cmd, \
const char *group_name, const char *filename);
/**
* query storage server list to fetch file
* params:
* pTrackerServer: tracker server
* pStorageServer: return storage server
* nMaxServerCount: max storage server count
* server_count: return storage server count
* group_name: the group name of storage server
* filename: filename on storage server
* return: 0 success, !=0 fail, return the error code
**/
int tracker_query_storage_list(ConnectionInfo *pTrackerServer, \
ConnectionInfo *pStorageServer, const int nMaxServerCount, \
int *server_count, char *group_name, const char *filename);
/**
* delete a storage server from cluster
* params:
* pTrackerGroup: the tracker group
* group_name: the group name which the storage server belongs to
* storage_id: the storage server id
* return: 0 success, !=0 fail, return the error code
**/
int tracker_delete_storage(TrackerServerGroup *pTrackerGroup, \
const char *group_name, const char *storage_id);
/**
* delete a group from cluster
* params:
* pTrackerGroup: the tracker group
* group_name: the group name to delete
* return: 0 success, !=0 fail, return the error code
**/
int tracker_delete_group(TrackerServerGroup *pTrackerGroup, \
const char *group_name);
/**
* set trunk server of the specified group
* params:
* pTrackerGroup: the tracker group
* group_name: the group name which the storage server belongs to
* storage_id: the storage server id, can be NULL or empty
* new_trunk_server_id: the new trunk server id
* return: 0 success, !=0 fail, return the error code
**/
int tracker_set_trunk_server(TrackerServerGroup *pTrackerGroup, \
const char *group_name, const char *storage_id, \
char *new_trunk_server_id);
/**
* get storage server status from the tracker server
* params:
* pTrackerServer: tracker server
* group_name: the group name which the storage server belongs to
* ip_addr: the ip addr of the storage server
* pDestBuff: return the storage server brief info
* return: 0 success, !=0 fail, return the error code
**/
int tracker_get_storage_status(ConnectionInfo *pTrackerServer, \
const char *group_name, const char *ip_addr, \
FDFSStorageBrief *pDestBuff);
/**
* get storage server id from the tracker server
* params:
* pTrackerServer: tracker server
* group_name: the group name which the storage server belongs to
* ip_addr: the ip addr of the storage server
* storage_id: return the storage server id
* return: 0 success, !=0 fail, return the error code
**/
int tracker_get_storage_id(ConnectionInfo *pTrackerServer, \
const char *group_name, const char *ip_addr, \
char *storage_id);
/**
* get storage server highest level status from all tracker servers
* params:
* pTrackerGroup: the tracker group
* group_name: the group name which the storage server belongs to
* ip_addr: the ip addr of the storage server
* storage_id: return the storage server id
* status: return the highest level status
* return: 0 success, !=0 fail, return the error code
**/
int tracker_get_storage_max_status(TrackerServerGroup *pTrackerGroup, \
const char *group_name, const char *ip_addr, \
char *storage_id, int *status);
#ifdef __cplusplus
}
#endif
#endif

28
JCEC-fastdfs/common/Makefile Executable file
View File

@ -0,0 +1,28 @@
.SUFFIXES: .c .o
COMPILE = $(CC) -Wall -O2 -D_FILE_OFFSET_BITS=64 -DOS_LINUX
#COMPILE = $(CC) -Wall -g -D_FILE_OFFSET_BITS=64 -DOS_LINUX -D__DEBUG__
INC_PATH = -I/usr/local/include
LIB_PATH = -L/usr/local/lib
TARGET_PATH = /usr/local/bin
COMMON_LIB =
SHARED_OBJS = hash.o chain.o shared_func.o ini_file_reader.o \
logger.o sockopt.o fdfs_global.o base64.o sched_thread.o \
mime_file_parser.o fdfs_http_shared.o
ALL_OBJS = $(SHARED_OBJS)
ALL_PRGS =
all: $(ALL_OBJS) $(ALL_PRGS)
.o:
$(COMPILE) -o $@ $< $(SHARED_OBJS) $(COMMON_LIB) $(LIB_PATH) $(INC_PATH)
.c:
$(COMPILE) -o $@ $< $(ALL_OBJS) $(COMMON_LIB) $(LIB_PATH) $(INC_PATH)
.c.o:
$(COMPILE) -c -o $@ $< $(INC_PATH)
install:
cp -f $(ALL_PRGS) $(TARGET_PATH)
clean:
rm -f $(ALL_OBJS) $(ALL_PRGS)

View File

@ -0,0 +1,42 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
//fdfs_define.h
#ifndef _FDFS_DEFINE_H_
#define _FDFS_DEFINE_H_
#include <pthread.h>
#include "fastcommon/common_define.h"
#define FDFS_TRACKER_SERVER_DEF_PORT 22122
#define FDFS_STORAGE_SERVER_DEF_PORT 23000
#define FDFS_DEF_STORAGE_RESERVED_MB 1024
#define TRACKER_ERROR_LOG_FILENAME "trackerd"
#define STORAGE_ERROR_LOG_FILENAME "storaged"
#define FDFS_RECORD_SEPERATOR '\x01'
#define FDFS_FIELD_SEPERATOR '\x02'
#define SYNC_BINLOG_BUFF_DEF_INTERVAL 60
#define CHECK_ACTIVE_DEF_INTERVAL 100
#define DEFAULT_STORAGE_SYNC_FILE_MAX_DELAY 86400
#define DEFAULT_STORAGE_SYNC_FILE_MAX_TIME 300
#ifdef __cplusplus
extern "C" {
#endif
#ifdef __cplusplus
}
#endif
#endif

149
JCEC-fastdfs/common/fdfs_global.c Executable file
View File

@ -0,0 +1,149 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <netinet/in.h>
#include <fcntl.h>
#include <errno.h>
#include "fastcommon/logger.h"
#include "fdfs_global.h"
int g_fdfs_connect_timeout = DEFAULT_CONNECT_TIMEOUT;
int g_fdfs_network_timeout = DEFAULT_NETWORK_TIMEOUT;
char g_fdfs_base_path[MAX_PATH_SIZE] = {'/', 't', 'm', 'p', '\0'};
Version g_fdfs_version = {6, 6};
bool g_use_connection_pool = false;
ConnectionPool g_connection_pool;
int g_connection_pool_max_idle_time = 3600;
/*
data filename format:
HH/HH/filename: HH for 2 uppercase hex chars
*/
int fdfs_check_data_filename(const char *filename, const int len)
{
if (len < 6)
{
logError("file: "__FILE__", line: %d, " \
"the length=%d of filename \"%s\" is too short", \
__LINE__, len, filename);
return EINVAL;
}
if (!IS_UPPER_HEX(*filename) || !IS_UPPER_HEX(*(filename+1)) || \
*(filename+2) != '/' || \
!IS_UPPER_HEX(*(filename+3)) || !IS_UPPER_HEX(*(filename+4)) || \
*(filename+5) != '/')
{
logError("file: "__FILE__", line: %d, " \
"the format of filename \"%s\" is invalid", \
__LINE__, filename);
return EINVAL;
}
if (strchr(filename + 6, '/') != NULL)
{
logError("file: "__FILE__", line: %d, " \
"the format of filename \"%s\" is invalid", \
__LINE__, filename);
return EINVAL;
}
return 0;
}
int fdfs_gen_slave_filename(const char *master_filename, \
const char *prefix_name, const char *ext_name, \
char *filename, int *filename_len)
{
char true_ext_name[FDFS_FILE_EXT_NAME_MAX_LEN + 2];
char *pDot;
int master_file_len;
master_file_len = strlen(master_filename);
if (master_file_len < 28 + FDFS_FILE_EXT_NAME_MAX_LEN)
{
logError("file: "__FILE__", line: %d, " \
"master filename \"%s\" is invalid", \
__LINE__, master_filename);
return EINVAL;
}
pDot = strchr(master_filename + (master_file_len - \
(FDFS_FILE_EXT_NAME_MAX_LEN + 1)), '.');
if (ext_name != NULL)
{
if (*ext_name == '\0')
{
*true_ext_name = '\0';
}
else if (*ext_name == '.')
{
snprintf(true_ext_name, sizeof(true_ext_name), \
"%s", ext_name);
}
else
{
snprintf(true_ext_name, sizeof(true_ext_name), \
".%s", ext_name);
}
}
else
{
if (pDot == NULL)
{
*true_ext_name = '\0';
}
else
{
strcpy(true_ext_name, pDot);
}
}
if (*true_ext_name == '\0' && strcmp(prefix_name, "-m") == 0)
{
logError("file: "__FILE__", line: %d, " \
"prefix_name \"%s\" is invalid", \
__LINE__, prefix_name);
return EINVAL;
}
/* when prefix_name is empty, the extension name of master file and
slave file can not be same
*/
if ((*prefix_name == '\0') && ((pDot == NULL && *true_ext_name == '\0')
|| (pDot != NULL && strcmp(pDot, true_ext_name) == 0)))
{
logError("file: "__FILE__", line: %d, " \
"empty prefix_name is not allowed", __LINE__);
return EINVAL;
}
if (pDot == NULL)
{
*filename_len = sprintf(filename, "%s%s%s", master_filename, \
prefix_name, true_ext_name);
}
else
{
*filename_len = pDot - master_filename;
memcpy(filename, master_filename, *filename_len);
*filename_len += sprintf(filename + *filename_len, "%s%s", \
prefix_name, true_ext_name);
}
return 0;
}

View File

@ -0,0 +1,42 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
//fdfs_global.h
#ifndef _FDFS_GLOBAL_H
#define _FDFS_GLOBAL_H
#include "fastcommon/common_define.h"
#include "fdfs_define.h"
#include "fastcommon/connection_pool.h"
#define FDFS_FILE_EXT_NAME_MAX_LEN 6
#ifdef __cplusplus
extern "C" {
#endif
extern int g_fdfs_connect_timeout;
extern int g_fdfs_network_timeout;
extern char g_fdfs_base_path[MAX_PATH_SIZE];
extern Version g_fdfs_version;
extern bool g_use_connection_pool;
extern ConnectionPool g_connection_pool;
extern int g_connection_pool_max_idle_time;
int fdfs_check_data_filename(const char *filename, const int len);
int fdfs_gen_slave_filename(const char *master_filename, \
const char *prefix_name, const char *ext_name, \
char *filename, int *filename_len);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,377 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <unistd.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <netinet/in.h>
#include <fcntl.h>
#include "fastcommon/logger.h"
#include "fastcommon/md5.h"
#include "fastcommon/shared_func.h"
#include "mime_file_parser.h"
#include "fdfs_global.h"
#include "fdfs_http_shared.h"
const char *fdfs_http_get_file_extension(const char *filename, \
const int filename_len, int *ext_len)
{
const char *pEnd;
const char *pExtName;
int i;
pEnd = filename + filename_len;
pExtName = pEnd - 1;
for (i=0; i<FDFS_FILE_EXT_NAME_MAX_LEN && pExtName >= filename; \
i++, pExtName--)
{
if (*pExtName == '.')
{
break;
}
}
if (i < FDFS_FILE_EXT_NAME_MAX_LEN) //found
{
pExtName++; //skip .
*ext_len = pEnd - pExtName;
return pExtName;
}
else
{
*ext_len = 0;
return NULL;
}
}
int fdfs_http_get_content_type_by_extname(FDFSHTTPParams *pParams, \
const char *ext_name, const int ext_len, \
char *content_type, const int content_type_size)
{
HashData *pHashData;
if (ext_len == 0)
{
logWarning("file: "__FILE__", line: %d, " \
"extension name is empty, " \
"set to default content type: %s", \
__LINE__, pParams->default_content_type);
strcpy(content_type, pParams->default_content_type);
return 0;
}
pHashData = hash_find_ex(&pParams->content_type_hash, \
ext_name, ext_len + 1);
if (pHashData == NULL)
{
logWarning("file: "__FILE__", line: %d, " \
"extension name: %s is not supported, " \
"set to default content type: %s", \
__LINE__, ext_name, pParams->default_content_type);
strcpy(content_type, pParams->default_content_type);
return 0;
}
if (pHashData->value_len >= content_type_size)
{
*content_type = '\0';
logError("file: "__FILE__", line: %d, " \
"extension name: %s 's content type " \
"is too long", __LINE__, ext_name);
return EINVAL;
}
memcpy(content_type, pHashData->value, pHashData->value_len);
return 0;
}
int fdfs_http_params_load(IniContext *pIniContext, \
const char *conf_filename, FDFSHTTPParams *pParams)
{
int result;
int ext_len;
const char *ext_name;
char *mime_types_filename;
char szMimeFilename[256];
char *anti_steal_secret_key;
char *token_check_fail_filename;
char *default_content_type;
int def_content_type_len;
int64_t file_size;
memset(pParams, 0, sizeof(FDFSHTTPParams));
pParams->disabled = iniGetBoolValue(NULL, "http.disabled", \
pIniContext, false);
if (pParams->disabled)
{
return 0;
}
pParams->need_find_content_type = iniGetBoolValue(NULL, \
"http.need_find_content_type", \
pIniContext, true);
pParams->support_multi_range = iniGetBoolValue(NULL, \
"http.multi_range.enabed", \
pIniContext, true);
pParams->server_port = iniGetIntValue(NULL, "http.server_port", \
pIniContext, 80);
if (pParams->server_port <= 0)
{
logError("file: "__FILE__", line: %d, " \
"invalid param \"http.server_port\": %d", \
__LINE__, pParams->server_port);
return EINVAL;
}
pParams->anti_steal_token = iniGetBoolValue(NULL, \
"http.anti_steal.check_token", \
pIniContext, false);
if (pParams->need_find_content_type || pParams->anti_steal_token ||
pParams->support_multi_range)
{
mime_types_filename = iniGetStrValue(NULL, "http.mime_types_filename", \
pIniContext);
if (mime_types_filename == NULL || *mime_types_filename == '\0')
{
logError("file: "__FILE__", line: %d, " \
"param \"http.mime_types_filename\" not exist " \
"or is empty", __LINE__);
return EINVAL;
}
if (strncasecmp(mime_types_filename, "http://", 7) != 0 && \
*mime_types_filename != '/' && \
strncasecmp(conf_filename, "http://", 7) != 0)
{
char *pPathEnd;
pPathEnd = strrchr(conf_filename, '/');
if (pPathEnd == NULL)
{
snprintf(szMimeFilename, sizeof(szMimeFilename), \
"%s", mime_types_filename);
}
else
{
int nPathLen;
int nFilenameLen;
nPathLen = (pPathEnd - conf_filename) + 1;
nFilenameLen = strlen(mime_types_filename);
if (nPathLen + nFilenameLen >= sizeof(szMimeFilename))
{
logError("file: "__FILE__", line: %d, " \
"filename is too long, length %d >= %d",
__LINE__, nPathLen + nFilenameLen, \
(int)sizeof(szMimeFilename));
return ENOSPC;
}
memcpy(szMimeFilename, conf_filename, nPathLen);
memcpy(szMimeFilename + nPathLen, mime_types_filename, \
nFilenameLen);
*(szMimeFilename + nPathLen + nFilenameLen) = '\0';
}
}
else
{
snprintf(szMimeFilename, sizeof(szMimeFilename), \
"%s", mime_types_filename);
}
result = load_mime_types_from_file(&pParams->content_type_hash, \
szMimeFilename);
if (result != 0)
{
return result;
}
default_content_type = iniGetStrValue(NULL, \
"http.default_content_type", \
pIniContext);
if (default_content_type == NULL || *default_content_type == '\0')
{
logError("file: "__FILE__", line: %d, " \
"param \"http.default_content_type\" not exist " \
"or is empty", __LINE__);
return EINVAL;
}
def_content_type_len = strlen(default_content_type);
if (def_content_type_len >= sizeof(pParams->default_content_type))
{
logError("file: "__FILE__", line: %d, " \
"default content type: %s is too long", \
__LINE__, default_content_type);
return EINVAL;
}
memcpy(pParams->default_content_type, default_content_type, \
def_content_type_len);
}
if (!pParams->anti_steal_token)
{
return 0;
}
pParams->token_ttl = iniGetIntValue(NULL, \
"http.anti_steal.token_ttl", \
pIniContext, 600);
if (pParams->token_ttl <= 0)
{
logError("file: "__FILE__", line: %d, " \
"param \"http.anti_steal.token_ttl\" is invalid", \
__LINE__);
return EINVAL;
}
anti_steal_secret_key = iniGetStrValue(NULL, \
"http.anti_steal.secret_key", \
pIniContext);
if (anti_steal_secret_key == NULL || *anti_steal_secret_key == '\0')
{
logError("file: "__FILE__", line: %d, " \
"param \"http.anti_steal.secret_key\" not exist " \
"or is empty", __LINE__);
return EINVAL;
}
buffer_strcpy(&pParams->anti_steal_secret_key, anti_steal_secret_key);
token_check_fail_filename = iniGetStrValue(NULL, \
"http.anti_steal.token_check_fail", \
pIniContext);
if (token_check_fail_filename == NULL || \
*token_check_fail_filename == '\0')
{
return 0;
}
if (!fileExists(token_check_fail_filename))
{
logError("file: "__FILE__", line: %d, " \
"token_check_fail file: %s not exists", __LINE__, \
token_check_fail_filename);
return ENOENT;
}
ext_name = fdfs_http_get_file_extension(token_check_fail_filename, \
strlen(token_check_fail_filename), &ext_len);
if ((result=fdfs_http_get_content_type_by_extname(pParams, \
ext_name, ext_len, \
pParams->token_check_fail_content_type, \
sizeof(pParams->token_check_fail_content_type))) != 0)
{
return result;
}
if (!(pParams->need_find_content_type || pParams->support_multi_range))
{
hash_destroy(&pParams->content_type_hash);
}
if ((result=getFileContent(token_check_fail_filename, \
&pParams->token_check_fail_buff.buff, &file_size)) != 0)
{
return result;
}
pParams->token_check_fail_buff.alloc_size = file_size;
pParams->token_check_fail_buff.length = file_size;
return 0;
}
void fdfs_http_params_destroy(FDFSHTTPParams *pParams)
{
if (!(pParams->need_find_content_type || pParams->support_multi_range))
{
hash_destroy(&pParams->content_type_hash);
}
}
int fdfs_http_gen_token(const BufferInfo *secret_key, const char *file_id, \
const int timestamp, char *token)
{
char buff[256 + 64];
unsigned char digit[16];
int id_len;
int total_len;
id_len = strlen(file_id);
if (id_len + secret_key->length + 12 > sizeof(buff))
{
return ENOSPC;
}
memcpy(buff, file_id, id_len);
total_len = id_len;
memcpy(buff + total_len, secret_key->buff, secret_key->length);
total_len += secret_key->length;
total_len += sprintf(buff + total_len, "%d", timestamp);
my_md5_buffer(buff, total_len, digit);
bin2hex((char *)digit, 16, token);
return 0;
}
int fdfs_http_check_token(const BufferInfo *secret_key, const char *file_id, \
const int timestamp, const char *token, const int ttl)
{
char true_token[33];
int result;
int token_len;
token_len = strlen(token);
if (token_len != 32)
{
return EINVAL;
}
if ((timestamp != 0) && (time(NULL) - timestamp > ttl))
{
return ETIMEDOUT;
}
if ((result=fdfs_http_gen_token(secret_key, file_id, \
timestamp, true_token)) != 0)
{
return result;
}
return (memcmp(token, true_token, 32) == 0) ? 0 : EPERM;
}
char *fdfs_http_get_parameter(const char *param_name, KeyValuePair *params, \
const int param_count)
{
KeyValuePair *pCurrent;
KeyValuePair *pEnd;
pEnd = params + param_count;
for (pCurrent=params; pCurrent<pEnd; pCurrent++)
{
if (strcmp(pCurrent->key, param_name) == 0)
{
return pCurrent->value;
}
}
return NULL;
}

View File

@ -0,0 +1,128 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#ifndef _FDFS_HTTP_SHARED_H
#define _FDFS_HTTP_SHARED_H
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include "fastcommon/ini_file_reader.h"
#include "fastcommon/hash.h"
typedef struct
{
bool disabled;
bool anti_steal_token;
/* if need find content type by file extension name */
bool need_find_content_type;
/* if support multi range */
bool support_multi_range;
/* the web server port */
int server_port;
/* key is file ext name, value is content type */
HashArray content_type_hash;
BufferInfo anti_steal_secret_key;
BufferInfo token_check_fail_buff;
char default_content_type[64];
char token_check_fail_content_type[64];
int token_ttl;
} FDFSHTTPParams;
#ifdef __cplusplus
extern "C" {
#endif
/**
load HTTP params from conf file
params:
pIniContext: the ini file items, return by iniLoadItems
conf_filename: config filename
pHTTPParams: the HTTP params
return: 0 for success, != 0 fail
**/
int fdfs_http_params_load(IniContext *pIniContext, \
const char *conf_filename, FDFSHTTPParams *pHTTPParams);
void fdfs_http_params_destroy(FDFSHTTPParams *pParams);
/**
generate anti-steal token
params:
secret_key: secret key buffer
file_id: FastDFS file id
timestamp: current timestamp, unix timestamp (seconds), 0 for never timeout
token: return token buffer
return: 0 for success, != 0 fail
**/
int fdfs_http_gen_token(const BufferInfo *secret_key, const char *file_id, \
const int timestamp, char *token);
/**
check anti-steal token
params:
secret_key: secret key buffer
file_id: FastDFS file id
timestamp: the timestamp to generate the token, unix timestamp (seconds)
token: token buffer
ttl: token ttl, delta seconds
return: 0 for passed, != 0 fail
**/
int fdfs_http_check_token(const BufferInfo *secret_key, const char *file_id, \
const int timestamp, const char *token, const int ttl);
/**
get parameter value
params:
param_name: the parameter name to get
params: parameter array
param_count: param count
return: param value pointer, return NULL if not exist
**/
char *fdfs_http_get_parameter(const char *param_name, KeyValuePair *params, \
const int param_count);
/**
get file extension name
params:
filename: the filename
filename_len: the length of filename
ext_len: return the length of extension name
return: extension name, NULL for none
**/
const char *fdfs_http_get_file_extension(const char *filename, \
const int filename_len, int *ext_len);
/**
get content type by file extension name
params:
pHTTPParams: the HTTP params
ext_name: the extension name
ext_len: the length of extension name
content_type: return content type
content_type_size: content type buffer size
return: 0 for success, != 0 fail
**/
int fdfs_http_get_content_type_by_extname(FDFSHTTPParams *pParams, \
const char *ext_name, const int ext_len, \
char *content_type, const int content_type_size);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,131 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <unistd.h>
#include <errno.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <netinet/in.h>
#include <fcntl.h>
#include "fastcommon/logger.h"
#include "fastcommon/http_func.h"
#include "fastcommon/shared_func.h"
#include "mime_file_parser.h"
int load_mime_types_from_file(HashArray *pHash, const char *mime_filename)
{
#define MIME_DELIM_CHARS " \t"
int result;
char *content;
char *pLine;
char *pLastEnd;
char *content_type;
char *ext_name;
char *lasts;
int http_status;
int content_len;
int64_t file_size;
char error_info[512];
if (strncasecmp(mime_filename, "http://", 7) == 0)
{
if ((result=get_url_content(mime_filename, 30, 60, &http_status,\
&content, &content_len, error_info)) != 0)
{
logError("file: "__FILE__", line: %d, " \
"get_url_content fail, " \
"url: %s, error info: %s", \
__LINE__, mime_filename, error_info);
return result;
}
if (http_status != 200)
{
free(content);
logError("file: "__FILE__", line: %d, " \
"HTTP status code: %d != 200, url: %s", \
__LINE__, http_status, mime_filename);
return EINVAL;
}
}
else
{
if ((result=getFileContent(mime_filename, &content, \
&file_size)) != 0)
{
return result;
}
}
if ((result=hash_init_ex(pHash, PJWHash, 2 * 1024, 0.75, 0, true)) != 0)
{
free(content);
logError("file: "__FILE__", line: %d, " \
"hash_init_ex fail, errno: %d, error info: %s", \
__LINE__, result, STRERROR(result));
return result;
}
pLastEnd = content - 1;
while (pLastEnd != NULL)
{
pLine = pLastEnd + 1;
pLastEnd = strchr(pLine, '\n');
if (pLastEnd != NULL)
{
*pLastEnd = '\0';
}
if (*pLine == '\0' || *pLine == '#')
{
continue;
}
lasts = NULL;
content_type = strtok_r(pLine, MIME_DELIM_CHARS, &lasts);
while (1)
{
ext_name = strtok_r(NULL, MIME_DELIM_CHARS, &lasts);
if (ext_name == NULL)
{
break;
}
if (*ext_name == '\0')
{
continue;
}
if ((result=hash_insert_ex(pHash, ext_name, \
strlen(ext_name)+1, content_type, \
strlen(content_type)+1, true)) < 0)
{
free(content);
result *= -1;
logError("file: "__FILE__", line: %d, " \
"hash_insert_ex fail, errno: %d, " \
"error info: %s", __LINE__, \
result, STRERROR(result));
return result;
}
}
}
free(content);
//hash_stat_print(pHash);
return 0;
}

View File

@ -0,0 +1,42 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#ifndef _MINE_FILE_PARSER_H
#define _MINE_FILE_PARSER_H
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include "fastcommon/hash.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
load mime types from file
params:
pHash: hash array to store the mime types,
key is the file extension name, eg. jpg
value is the content type, eg. image/jpeg
the hash array will be initialized in this function,
the hash array should be destroyed when used done
mime_filename: the mime filename,
file format is same as apache's file: mime.types
return: 0 for success, !=0 for fail
**/
int load_mime_types_from_file(HashArray *pHash, const char *mime_filename);
#ifdef __cplusplus
}
#endif
#endif

71
JCEC-fastdfs/conf/client.conf Executable file
View File

@ -0,0 +1,71 @@
# connect timeout in seconds
# default value is 30s
# Note: in the intranet network (LAN), 2 seconds is enough.
connect_timeout = 5
# network timeout in seconds
# default value is 30s
network_timeout = 60
# the base path to store log files
base_path = /home/yuqing/fastdfs
# tracker_server can ocur more than once for multi tracker servers.
# the value format of tracker_server is "HOST:PORT",
# the HOST can be hostname or ip address,
# and the HOST can be dual IPs or hostnames seperated by comma,
# the dual IPS must be an inner (intranet) IP and an outer (extranet) IP,
# or two different types of inner (intranet) IPs.
# for example: 192.168.2.100,122.244.141.46:22122
# another eg.: 192.168.1.10,172.17.4.21:22122
tracker_server = 192.168.0.196:22122
tracker_server = 192.168.0.197:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level = info
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# if load FastDFS parameters from tracker server
# since V4.05
# default value is false
load_fdfs_parameters_from_tracker = false
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V4.05
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V4.05
storage_ids_filename = storage_ids.conf
#HTTP settings
http.tracker_server_port = 80
#use "#include" directive to include HTTP other settiongs
##include http.conf

29
JCEC-fastdfs/conf/http.conf Executable file
View File

@ -0,0 +1,29 @@
# HTTP default content type
http.default_content_type = application/octet-stream
# MIME types mapping filename
# MIME types file format: MIME_type extensions
# such as: image/jpeg jpeg jpg jpe
# you can use apache's MIME file: mime.types
http.mime_types_filename = mime.types
# if use token to anti-steal
# default value is false (0)
http.anti_steal.check_token = false
# token TTL (time to live), seconds
# default value is 600
http.anti_steal.token_ttl = 900
# secret key to generate anti-steal token
# this parameter must be set when http.anti_steal.check_token set to true
# the length of the secret key should not exceed 128 bytes
http.anti_steal.secret_key = FastDFS1234567890
# return the content of the file when check token fail
# default value is empty (no file sepecified)
http.anti_steal.token_check_fail = /home/yuqing/fastdfs/conf/anti-steal.jpg
# if support multi regions for HTTP Range
# default value is true
http.multi_range.enabed = true

1065
JCEC-fastdfs/conf/mime.types Executable file

File diff suppressed because it is too large Load Diff

353
JCEC-fastdfs/conf/storage.conf Executable file
View File

@ -0,0 +1,353 @@
# is this config file disabled
# false for enabled
# true for disabled
disabled = false
# the name of the group this storage server belongs to
#
# comment or remove this item for fetching from tracker server,
# in this case, use_storage_id must set to true in tracker.conf,
# and storage_ids.conf must be configured correctly.
group_name = group1
# bind an address of this host
# empty for bind all addresses of this host
bind_addr =
# if bind an address of this host when connect to other servers
# (this storage server as a client)
# true for binding the address configured by the above parameter: "bind_addr"
# false for binding any address of this host
client_bind = true
# the storage server port
port = 23000
# connect timeout in seconds
# default value is 30
# Note: in the intranet network (LAN), 2 seconds is enough.
connect_timeout = 5
# network timeout in seconds for send and recv
# default value is 30
network_timeout = 60
# the heart beat interval in seconds
# the storage server send heartbeat to tracker server periodically
# default value is 30
heart_beat_interval = 30
# disk usage report interval in seconds
# the storage server send disk usage report to tracker server periodically
# default value is 300
stat_report_interval = 60
# the base path to store data and log files
# NOTE: the binlog files maybe are large, make sure
# the base path has enough disk space,
# eg. the disk free space should > 50GB
base_path = /home/yuqing/fastdfs
# max concurrent connections the server supported,
# you should set this parameter larger, eg. 10240
# default value is 256
max_connections = 1024
# the buff size to recv / send data from/to network
# this parameter must more than 8KB
# 256KB or 512KB is recommended
# default value is 64KB
# since V2.00
buff_size = 256KB
# accept thread count
# default value is 1 which is recommended
# since V4.07
accept_threads = 1
# work thread count
# work threads to deal network io
# default value is 4
# since V2.00
work_threads = 4
# if disk read / write separated
## false for mixed read and write
## true for separated read and write
# default value is true
# since V2.00
disk_rw_separated = true
# disk reader thread count per store path
# for mixed read / write, this parameter can be 0
# default value is 1
# since V2.00
disk_reader_threads = 1
# disk writer thread count per store path
# for mixed read / write, this parameter can be 0
# default value is 1
# since V2.00
disk_writer_threads = 1
# when no entry to sync, try read binlog again after X milliseconds
# must > 0, default value is 200ms
sync_wait_msec = 50
# after sync a file, usleep milliseconds
# 0 for sync successively (never call usleep)
sync_interval = 0
# storage sync start time of a day, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
sync_start_time = 00:00
# storage sync end time of a day, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
sync_end_time = 23:59
# write to the mark file after sync N files
# default value is 500
write_mark_file_freq = 500
# disk recovery thread count
# default value is 1
# since V6.04
disk_recovery_threads = 3
# store path (disk or mount point) count, default value is 1
store_path_count = 1
# store_path#, based on 0, to configure the store paths to store files
# if store_path0 not exists, it's value is base_path (NOT recommended)
# the paths must be exist.
#
# IMPORTANT NOTE:
# the store paths' order is very important, don't mess up!!!
# the base_path should be independent (different) of the store paths
store_path0 = /home/yuqing/fastdfs
#store_path1 = /home/yuqing/fastdfs2
# subdir_count * subdir_count directories will be auto created under each
# store_path (disk), value can be 1 to 256, default value is 256
subdir_count_per_path = 256
# tracker_server can ocur more than once for multi tracker servers.
# the value format of tracker_server is "HOST:PORT",
# the HOST can be hostname or ip address,
# and the HOST can be dual IPs or hostnames seperated by comma,
# the dual IPS must be an inner (intranet) IP and an outer (extranet) IP,
# or two different types of inner (intranet) IPs.
# for example: 192.168.2.100,122.244.141.46:22122
# another eg.: 192.168.1.10,172.17.4.21:22122
tracker_server = 192.168.209.121:22122
tracker_server = 192.168.209.122:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level = info
#unix group name to run this program,
#not set (empty) means run by the group of current user
run_by_group =
#unix username to run this program,
#not set (empty) means run by current user
run_by_user =
# allow_hosts can ocur more than once, host can be hostname or ip address,
# "*" (only one asterisk) means match all ip addresses
# we can use CIDR ips like 192.168.5.64/26
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
# for example:
# allow_hosts=10.0.1.[1-15,20]
# allow_hosts=host[01-08,20-25].domain.com
# allow_hosts=192.168.5.64/26
allow_hosts = *
# the mode of the files distributed to the data path
# 0: round robin(default)
# 1: random, distributted by hash code
file_distribute_path_mode = 0
# valid when file_distribute_to_path is set to 0 (round robin).
# when the written file count reaches this number, then rotate to next path.
# rotate to the first path (00/00) after the last path (such as FF/FF).
# default value is 100
file_distribute_rotate_count = 100
# call fsync to disk when write big file
# 0: never call fsync
# other: call fsync when written bytes >= this bytes
# default value is 0 (never call fsync)
fsync_after_written_bytes = 0
# sync log buff to disk every interval seconds
# must > 0, default value is 10 seconds
sync_log_buff_interval = 1
# sync binlog buff / cache to disk every interval seconds
# default value is 60 seconds
sync_binlog_buff_interval = 1
# sync storage stat info to disk every interval seconds
# default value is 300 seconds
sync_stat_file_interval = 300
# thread stack size, should >= 512KB
# default value is 512KB
thread_stack_size = 512KB
# the priority as a source server for uploading file.
# the lower this value, the higher its uploading priority.
# default value is 10
upload_priority = 10
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
# multi aliases split by comma. empty value means auto set by OS type
# default values is empty
if_alias_prefix =
# if check file duplicate, when set to true, use FastDHT to store file indexes
# 1 or yes: need check
# 0 or no: do not check
# default value is 0
check_file_duplicate = 0
# file signature method for check file duplicate
## hash: four 32 bits hash code
## md5: MD5 signature
# default value is hash
# since V4.01
file_signature_method = hash
# namespace for storing file indexes (key-value pairs)
# this item must be set when check_file_duplicate is true / on
key_namespace = FastDFS
# set keep_alive to 1 to enable persistent connection with FastDHT servers
# default value is 0 (short connection)
keep_alive = 0
# you can use "#include filename" (not include double quotes) directive to
# load FastDHT server list, when the filename is a relative path such as
# pure filename, the base path is the base path of current/this config file.
# must set FastDHT server list when check_file_duplicate is true / on
# please see INSTALL of FastDHT for detail
##include /home/yuqing/fastdht/conf/fdht_servers.conf
# if log to access log
# default value is false
# since V4.00
use_access_log = false
# if rotate the access log every day
# default value is false
# since V4.00
rotate_access_log = false
# rotate access log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.00
access_log_rotate_time = 00:00
# if compress the old access log by gzip
# default value is false
# since V6.04
compress_old_access_log = false
# compress the access log days before
# default value is 1
# since V6.04
compress_access_log_days_before = 7
# if rotate the error log every day
# default value is false
# since V4.02
rotate_error_log = false
# rotate error log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.02
error_log_rotate_time = 00:00
# if compress the old error log by gzip
# default value is false
# since V6.04
compress_old_error_log = false
# compress the error log days before
# default value is 1
# since V6.04
compress_error_log_days_before = 7
# rotate access log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_access_log_size = 0
# rotate error log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_error_log_size = 0
# keep days of the log files
# 0 means do not delete old log files
# default value is 0
log_file_keep_days = 0
# if skip the invalid record when sync file
# default value is false
# since V4.02
file_sync_skip_invalid_record = false
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = true
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# if compress the binlog files by gzip
# default value is false
# since V6.01
compress_binlog = true
# try to compress binlog time, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 01:30
# since V6.01
compress_binlog_time = 01:30
# if check the mark of store path to prevent confusion
# recommend to set this parameter to true
# if two storage servers (instances) MUST use a same store path for
# some specific purposes, you should set this parameter to false
# default value is true
# since V6.03
check_store_path_mark = true
# use the ip address of this storage server if domain_name is empty,
# else this domain name will ocur in the url redirected by the tracker server
http.domain_name =
# the port of the web server on this storage server
http.server_port = 8888

View File

@ -0,0 +1,16 @@
# <id> <group_name> <ip_or_hostname[:port]>
#
# id is a natural number (1, 2, 3 etc.),
# 6 bits of the id length is enough, such as 100001
#
# storage ip or hostname can be dual IPs seperated by comma,
# one is an inner (intranet) IP and another is an outer (extranet) IP,
# or two different types of inner (intranet) IPs
# for example: 192.168.2.100,122.244.141.46
# another eg.: 192.168.1.10,172.17.4.21
#
# the port is optional. if you run more than one storaged instances
# in a server, you must specified the port to distinguish different instances.
100001 group1 192.168.0.196
100002 group1 192.168.0.197

329
JCEC-fastdfs/conf/tracker.conf Executable file
View File

@ -0,0 +1,329 @@
# is this config file disabled
# false for enabled
# true for disabled
disabled = false
# bind an address of this host
# empty for bind all addresses of this host
bind_addr =
# the tracker server port
port = 22122
# connect timeout in seconds
# default value is 30
# Note: in the intranet network (LAN), 2 seconds is enough.
connect_timeout = 5
# network timeout in seconds for send and recv
# default value is 30
network_timeout = 60
# the base path to store data and log files
base_path = /home/yuqing/fastdfs
# max concurrent connections this server support
# you should set this parameter larger, eg. 10240
# default value is 256
max_connections = 1024
# accept thread count
# default value is 1 which is recommended
# since V4.07
accept_threads = 1
# work thread count
# work threads to deal network io
# default value is 4
# since V2.00
work_threads = 4
# the min network buff size
# default value 8KB
min_buff_size = 8KB
# the max network buff size
# default value 128KB
max_buff_size = 128KB
# the method for selecting group to upload files
# 0: round robin
# 1: specify group
# 2: load balance, select the max free space group to upload file
store_lookup = 2
# which group to upload file
# when store_lookup set to 1, must set store_group to the group name
store_group = group2
# which storage server to upload file
# 0: round robin (default)
# 1: the first server order by ip address
# 2: the first server order by priority (the minimal)
# Note: if use_trunk_file set to true, must set store_server to 1 or 2
store_server = 0
# which path (means disk or mount point) of the storage server to upload file
# 0: round robin
# 2: load balance, select the max free space path to upload file
store_path = 0
# which storage server to download file
# 0: round robin (default)
# 1: the source storage server which the current file uploaded to
download_server = 0
# reserved storage space for system or other applications.
# if the free(available) space of any stoarge server in
# a group <= reserved_storage_space, no file can be uploaded to this group.
# bytes unit can be one of follows:
### G or g for gigabyte(GB)
### M or m for megabyte(MB)
### K or k for kilobyte(KB)
### no unit for byte(B)
### XX.XX% as ratio such as: reserved_storage_space = 10%
reserved_storage_space = 20%
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level = info
#unix group name to run this program,
#not set (empty) means run by the group of current user
run_by_group=
#unix username to run this program,
#not set (empty) means run by current user
run_by_user =
# allow_hosts can ocur more than once, host can be hostname or ip address,
# "*" (only one asterisk) means match all ip addresses
# we can use CIDR ips like 192.168.5.64/26
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
# for example:
# allow_hosts=10.0.1.[1-15,20]
# allow_hosts=host[01-08,20-25].domain.com
# allow_hosts=192.168.5.64/26
allow_hosts = *
# sync log buff to disk every interval seconds
# default value is 10 seconds
sync_log_buff_interval = 1
# check storage server alive interval seconds
check_active_interval = 120
# thread stack size, should >= 64KB
# default value is 256KB
thread_stack_size = 256KB
# auto adjust when the ip address of the storage server changed
# default value is true
storage_ip_changed_auto_adjust = true
# storage sync file max delay seconds
# default value is 86400 seconds (one day)
# since V2.00
storage_sync_file_max_delay = 86400
# the max time of storage sync a file
# default value is 300 seconds
# since V2.00
storage_sync_file_max_time = 300
# if use a trunk file to store several small files
# default value is false
# since V3.00
use_trunk_file = false
# the min slot size, should <= 4KB
# default value is 256 bytes
# since V3.00
slot_min_size = 256
# the max slot size, should > slot_min_size
# store the upload file to trunk file when it's size <= this value
# default value is 16MB
# since V3.00
slot_max_size = 1MB
# the alignment size to allocate the trunk space
# default value is 0 (never align)
# since V6.05
# NOTE: the larger the alignment size, the less likely of disk
# fragmentation, but the more space is wasted.
trunk_alloc_alignment_size = 256
# if merge contiguous free spaces of trunk file
# default value is false
# since V6.05
trunk_free_space_merge = true
# if delete / reclaim the unused trunk files
# default value is false
# since V6.05
delete_unused_trunk_files = false
# the trunk file size, should >= 4MB
# default value is 64MB
# since V3.00
trunk_file_size = 64MB
# if create trunk file advancely
# default value is false
# since V3.06
trunk_create_file_advance = false
# the time base to create trunk file
# the time format: HH:MM
# default value is 02:00
# since V3.06
trunk_create_file_time_base = 02:00
# the interval of create trunk file, unit: second
# default value is 38400 (one day)
# since V3.06
trunk_create_file_interval = 86400
# the threshold to create trunk file
# when the free trunk file size less than the threshold,
# will create he trunk files
# default value is 0
# since V3.06
trunk_create_file_space_threshold = 20G
# if check trunk space occupying when loading trunk free spaces
# the occupied spaces will be ignored
# default value is false
# since V3.09
# NOTICE: set this parameter to true will slow the loading of trunk spaces
# when startup. you should set this parameter to true when neccessary.
trunk_init_check_occupying = false
# if ignore storage_trunk.dat, reload from trunk binlog
# default value is false
# since V3.10
# set to true once for version upgrade when your version less than V3.10
trunk_init_reload_from_binlog = false
# the min interval for compressing the trunk binlog file
# unit: second, 0 means never compress
# FastDFS compress the trunk binlog when trunk init and trunk destroy
# recommand to set this parameter to 86400 (one day)
# default value is 0
# since V5.01
trunk_compress_binlog_min_interval = 86400
# the interval for compressing the trunk binlog file
# unit: second, 0 means never compress
# recommand to set this parameter to 86400 (one day)
# default value is 0
# since V6.05
trunk_compress_binlog_interval = 86400
# compress the trunk binlog time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 03:00
# since V6.05
trunk_compress_binlog_time_base = 03:00
# max backups for the trunk binlog file
# default value is 0 (never backup)
# since V6.05
trunk_binlog_max_backups = 7
# if use storage server ID instead of IP address
# if you want to use dual IPs for storage server, you MUST set
# this parameter to true, and configure the dual IPs in the file
# configured by following item "storage_ids_filename", such as storage_ids.conf
# default value is false
# since V4.00
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# this parameter is valid only when use_storage_id set to true
# since V4.00
storage_ids_filename = storage_ids.conf
# id type of the storage server in the filename, values are:
## ip: the ip address of the storage server
## id: the server id of the storage server
# this paramter is valid only when use_storage_id set to true
# default value is ip
# since V4.03
id_type_in_filename = id
# if store slave file use symbol link
# default value is false
# since V4.01
store_slave_file_use_link = false
# if rotate the error log every day
# default value is false
# since V4.02
rotate_error_log = false
# rotate error log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.02
error_log_rotate_time = 00:00
# if compress the old error log by gzip
# default value is false
# since V6.04
compress_old_error_log = false
# compress the error log days before
# default value is 1
# since V6.04
compress_error_log_days_before = 7
# rotate error log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_error_log_size = 0
# keep days of the log files
# 0 means do not delete old log files
# default value is 0
log_file_keep_days = 0
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = true
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# HTTP port on this tracker server
http.server_port = 8080
# check storage HTTP server alive interval seconds
# <= 0 for never check
# default value is 30
http.check_alive_interval = 30
# check storage HTTP server alive type, values are:
# tcp : connect to the storge server with HTTP port only,
# do not request and get response
# http: storage check alive url must return http status 200
# default value is tcp
http.check_alive_type = tcp
# check storage HTTP server alive uri/url
# NOTE: storage embed HTTP server support uri: /status.html
http.check_alive_uri = /status.html

View File

@ -0,0 +1,42 @@
# centos 7
FROM centos:7
# 添加配置文件
# add profiles
ADD conf/client.conf /etc/fdfs/
ADD conf/http.conf /etc/fdfs/
ADD conf/mime.types /etc/fdfs/
ADD conf/storage.conf /etc/fdfs/
ADD conf/tracker.conf /etc/fdfs/
ADD fastdfs.sh /home
ADD conf/nginx.conf /etc/fdfs/
ADD conf/mod_fastdfs.conf /etc/fdfs
# 添加源文件
# add source code
ADD source/libfastcommon.tar.gz /usr/local/src/
ADD source/fastdfs.tar.gz /usr/local/src/
ADD source/fastdfs-nginx-module.tar.gz /usr/local/src/
ADD source/nginx-1.15.4.tar.gz /usr/local/src/
# Run
RUN yum install git gcc gcc-c++ make automake autoconf libtool pcre pcre-devel zlib zlib-devel openssl-devel wget vim -y \
&& mkdir /home/dfs \
&& cd /usr/local/src/ \
&& cd libfastcommon/ \
&& ./make.sh && ./make.sh install \
&& cd ../ \
&& cd fastdfs/ \
&& ./make.sh && ./make.sh install \
&& cd ../ \
&& cd nginx-1.15.4/ \
&& ./configure --add-module=/usr/local/src/fastdfs-nginx-module/src/ \
&& make && make install \
&& chmod +x /home/fastdfs.sh
# export config
VOLUME /etc/fdfs
EXPOSE 22122 23000 8888 80
ENTRYPOINT ["/home/fastdfs.sh"]

View File

@ -0,0 +1,49 @@
# FastDFS Dockerfile local (本地版本)
## 声明
其实并没什么区别 教程是在上一位huayanYu(小锅盖)和 Wiki的作者 的基础上进行了一些修改本质上还是huayanYu(小锅盖) 和 Wiki 上的作者写的教程
## 目录介绍
### conf
Dockerfile 所需要的一些配置文件
当然你也可以对这些文件进行一些修改 比如 storage.conf 里面的 bast_path 等相关
### source
FastDFS 所需要的一些需要从网上下载的包(包括 FastDFS 本身) ,因为天朝网络原因 导致 build 镜像的时候各种出错
所以干脆提前下载下来了 .
## 使用方法
需要注意的是 你需要在运行容器的时候制定宿主机的ip 用参数 FASTDFS_IPADDR 来指定
下面有一条docker run 的示例指令
```
docker run -d -e FASTDFS_IPADDR=192.168.1.234 -p 8888:8888 -p 22122:22122 -p 23000:23000 -p 8011:80 --name test-fast 镜像id/镜像名称
```
## 后记
本质上 local 版本与 network 版本无区别
## Statement
In fact, there is no difference between the tutorials written by Huayan Yu and Wiki on the basis of their previous authors. In essence, they are also tutorials written by the authors of Huayan Yu and Wiki.
## Catalogue introduction
### conf
Dockerfile Some configuration files needed
Of course, you can also make some modifications to these files, such as bast_path in storage. conf, etc.
### source
FastDFS Some of the packages that need to be downloaded from the Internet (including FastDFS itself) are due to various errors in building mirrors due to the Tianchao network
So I downloaded it in advance.
## Usage method
Note that you need to specify the host IP when running the container with the parameter FASTDFS_IPADDR
Here's a sample docker run instruction
```
docker run -d -e FASTDFS_IPADDR=192.168.1.234 -p 8888:8888 -p 22122:22122 -p 23000:23000 -p 8011:80 --name test-fast 镜像id/镜像名称
```
## Epilogue
Essentially, there is no difference between the local version and the network version.

View File

@ -0,0 +1,62 @@
# connect timeout in seconds
# default value is 30s
connect_timeout=30
# network timeout in seconds
# default value is 30s
network_timeout=60
# the base path to store log files
base_path=/home/dfs
# tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
tracker_server=com.ikingtech.ch116221:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# if load FastDFS parameters from tracker server
# since V4.05
# default value is false
load_fdfs_parameters_from_tracker=false
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V4.05
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V4.05
storage_ids_filename = storage_ids.conf
#HTTP settings
http.tracker_server_port=80
#use "#include" directive to include HTTP other settiongs
##include http.conf

View File

@ -0,0 +1,29 @@
# HTTP default content type
http.default_content_type = application/octet-stream
# MIME types mapping filename
# MIME types file format: MIME_type extensions
# such as: image/jpeg jpeg jpg jpe
# you can use apache's MIME file: mime.types
http.mime_types_filename=mime.types
# if use token to anti-steal
# default value is false (0)
http.anti_steal.check_token=false
# token TTL (time to live), seconds
# default value is 600
http.anti_steal.token_ttl=900
# secret key to generate anti-steal token
# this parameter must be set when http.anti_steal.check_token set to true
# the length of the secret key should not exceed 128 bytes
http.anti_steal.secret_key=FastDFS1234567890
# return the content of the file when check token fail
# default value is empty (no file sepecified)
http.anti_steal.token_check_fail=/home/yuqing/fastdfs/conf/anti-steal.jpg
# if support multi regions for HTTP Range
# default value is true
http.multi_range.enabed = true

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,133 @@
# connect timeout in seconds
# default value is 30s
connect_timeout=2
# network recv and send timeout in seconds
# default value is 30s
network_timeout=30
# the base path to store log files
base_path=/tmp
# if load FastDFS parameters from tracker server
# since V1.12
# default value is false
load_fdfs_parameters_from_tracker=true
# storage sync file max delay seconds
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V1.12
# default value is 86400 seconds (one day)
storage_sync_file_max_delay = 86400
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V1.13
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V1.13
storage_ids_filename = storage_ids.conf
# FastDFS tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
# valid only when load_fdfs_parameters_from_tracker is true
tracker_server=com.ikingtech.ch116221:22122
# the port of the local storage server
# the default value is 23000
storage_server_port=23000
# the group name of the local storage server
group_name=group1
# if the url / uri including the group name
# set to false when uri like /M00/00/00/xxx
# set to true when uri like ${group_name}/M00/00/00/xxx, such as group1/M00/xxx
# default value is false
url_have_group_name = true
# path(disk or mount point) count, default value is 1
# must same as storage.conf
store_path_count=1
# store_path#, based 0, if store_path0 not exists, it's value is base_path
# the paths must be exist
# must same as storage.conf
store_path0=/home/dfs
#store_path1=/home/yuqing/fastdfs1
# standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
# set the log filename, such as /usr/local/apache2/logs/mod_fastdfs.log
# empty for output to stderr (apache and nginx error_log file)
log_filename=
# response mode when the file not exist in the local file system
## proxy: get the content from other storage server, then send to client
## redirect: redirect to the original storage server (HTTP Header is Location)
response_mode=proxy
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
# multi aliases split by comma. empty value means auto set by OS type
# this paramter used to get all ip address of the local host
# default values is empty
if_alias_prefix=
# use "#include" directive to include HTTP config file
# NOTE: #include is an include directive, do NOT remove the # before include
#include http.conf
# if support flv
# default value is false
# since v1.15
flv_support = true
# flv file extension name
# default value is flv
# since v1.15
flv_extension = flv
# set the group count
# set to none zero to support multi-group on this storage server
# set to 0 for single group only
# groups settings section as [group1], [group2], ..., [groupN]
# default value is 0
# since v1.14
group_count = 0
# group settings for group #1
# since v1.14
# when support multi-group on this storage server, uncomment following section
#[group1]
#group_name=group1
#storage_server_port=23000
#store_path_count=2
#store_path0=/home/yuqing/fastdfs
#store_path1=/home/yuqing/fastdfs1
# group settings for group #2
# since v1.14
# when support multi-group, uncomment following section as neccessary
#[group2]
#group_name=group2
#storage_server_port=23000
#store_path_count=1
#store_path0=/home/yuqing/fastdfs

View File

@ -0,0 +1,127 @@
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
root html;
index index.html index.htm;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
server {
listen 8888;
server_name localhost;
location ~/group[0-9]/ {
ngx_fastdfs_module;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443 ssl;
# server_name localhost;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}

View File

@ -0,0 +1,287 @@
# is this config file disabled
# false for enabled
# true for disabled
disabled=false
# the name of the group this storage server belongs to
#
# comment or remove this item for fetching from tracker server,
# in this case, use_storage_id must set to true in tracker.conf,
# and storage_ids.conf must be configed correctly.
group_name=group1
# bind an address of this host
# empty for bind all addresses of this host
bind_addr=
# if bind an address of this host when connect to other servers
# (this storage server as a client)
# true for binding the address configed by above parameter: "bind_addr"
# false for binding any address of this host
client_bind=true
# the storage server port
port=23000
# connect timeout in seconds
# default value is 30s
connect_timeout=10
# network timeout in seconds
# default value is 30s
network_timeout=60
# heart beat interval in seconds
heart_beat_interval=30
# disk usage report interval in seconds
stat_report_interval=60
# the base path to store data and log files
base_path=/home/dfs
# max concurrent connections the server supported
# default value is 256
# more max_connections means more memory will be used
# you should set this parameter larger, eg. 10240
max_connections=1024
# the buff size to recv / send data
# this parameter must more than 8KB
# default value is 64KB
# since V2.00
buff_size = 256KB
# accept thread count
# default value is 1
# since V4.07
accept_threads=1
# work thread count, should <= max_connections
# work thread deal network io
# default value is 4
# since V2.00
work_threads=4
# if disk read / write separated
## false for mixed read and write
## true for separated read and write
# default value is true
# since V2.00
disk_rw_separated = true
# disk reader thread count per store base path
# for mixed read / write, this parameter can be 0
# default value is 1
# since V2.00
disk_reader_threads = 1
# disk writer thread count per store base path
# for mixed read / write, this parameter can be 0
# default value is 1
# since V2.00
disk_writer_threads = 1
# when no entry to sync, try read binlog again after X milliseconds
# must > 0, default value is 200ms
sync_wait_msec=50
# after sync a file, usleep milliseconds
# 0 for sync successively (never call usleep)
sync_interval=0
# storage sync start time of a day, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
sync_start_time=00:00
# storage sync end time of a day, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
sync_end_time=23:59
# write to the mark file after sync N files
# default value is 500
write_mark_file_freq=500
# path(disk or mount point) count, default value is 1
store_path_count=1
# store_path#, based 0, if store_path0 not exists, it's value is base_path
# the paths must be exist
store_path0=/home/dfs
#store_path1=/home/dfs2
# subdir_count * subdir_count directories will be auto created under each
# store_path (disk), value can be 1 to 256, default value is 256
subdir_count_per_path=256
# tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
tracker_server=com.ikingtech.ch116221:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
#unix group name to run this program,
#not set (empty) means run by the group of current user
run_by_group=
#unix username to run this program,
#not set (empty) means run by current user
run_by_user=
# allow_hosts can ocur more than once, host can be hostname or ip address,
# "*" (only one asterisk) means match all ip addresses
# we can use CIDR ips like 192.168.5.64/26
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
# for example:
# allow_hosts=10.0.1.[1-15,20]
# allow_hosts=host[01-08,20-25].domain.com
# allow_hosts=192.168.5.64/26
allow_hosts=*
# the mode of the files distributed to the data path
# 0: round robin(default)
# 1: random, distributted by hash code
file_distribute_path_mode=0
# valid when file_distribute_to_path is set to 0 (round robin),
# when the written file count reaches this number, then rotate to next path
# default value is 100
file_distribute_rotate_count=100
# call fsync to disk when write big file
# 0: never call fsync
# other: call fsync when written bytes >= this bytes
# default value is 0 (never call fsync)
fsync_after_written_bytes=0
# sync log buff to disk every interval seconds
# must > 0, default value is 10 seconds
sync_log_buff_interval=10
# sync binlog buff / cache to disk every interval seconds
# default value is 60 seconds
sync_binlog_buff_interval=10
# sync storage stat info to disk every interval seconds
# default value is 300 seconds
sync_stat_file_interval=300
# thread stack size, should >= 512KB
# default value is 512KB
thread_stack_size=512KB
# the priority as a source server for uploading file.
# the lower this value, the higher its uploading priority.
# default value is 10
upload_priority=10
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
# multi aliases split by comma. empty value means auto set by OS type
# default values is empty
if_alias_prefix=
# if check file duplicate, when set to true, use FastDHT to store file indexes
# 1 or yes: need check
# 0 or no: do not check
# default value is 0
check_file_duplicate=0
# file signature method for check file duplicate
## hash: four 32 bits hash code
## md5: MD5 signature
# default value is hash
# since V4.01
file_signature_method=hash
# namespace for storing file indexes (key-value pairs)
# this item must be set when check_file_duplicate is true / on
key_namespace=FastDFS
# set keep_alive to 1 to enable persistent connection with FastDHT servers
# default value is 0 (short connection)
keep_alive=0
# you can use "#include filename" (not include double quotes) directive to
# load FastDHT server list, when the filename is a relative path such as
# pure filename, the base path is the base path of current/this config file.
# must set FastDHT server list when check_file_duplicate is true / on
# please see INSTALL of FastDHT for detail
##include /home/yuqing/fastdht/conf/fdht_servers.conf
# if log to access log
# default value is false
# since V4.00
use_access_log = false
# if rotate the access log every day
# default value is false
# since V4.00
rotate_access_log = false
# rotate access log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.00
access_log_rotate_time=00:00
# if rotate the error log every day
# default value is false
# since V4.02
rotate_error_log = false
# rotate error log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.02
error_log_rotate_time=00:00
# rotate access log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_access_log_size = 0
# rotate error log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_error_log_size = 0
# keep days of the log files
# 0 means do not delete old log files
# default value is 0
log_file_keep_days = 0
# if skip the invalid record when sync file
# default value is false
# since V4.02
file_sync_skip_invalid_record=false
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# use the ip address of this storage server if domain_name is empty,
# else this domain name will ocur in the url redirected by the tracker server
http.domain_name=
# the port of the web server on this storage server
http.server_port=8888

View File

@ -0,0 +1,278 @@
# is this config file disabled
# false for enabled
# true for disabled
disabled=false
# bind an address of this host
# empty for bind all addresses of this host
bind_addr=
# the tracker server port
port=22122
# connect timeout in seconds
# default value is 30s
connect_timeout=10
# network timeout in seconds
# default value is 30s
network_timeout=60
# the base path to store data and log files
base_path=/home/dfs
# max concurrent connections this server supported
# you should set this parameter larger, eg. 102400
max_connections=1024
# accept thread count
# default value is 1
# since V4.07
accept_threads=1
# work thread count, should <= max_connections
# default value is 4
# since V2.00
work_threads=4
# min buff size
# default value 8KB
min_buff_size = 8KB
# max buff size
# default value 128KB
max_buff_size = 128KB
# the method of selecting group to upload files
# 0: round robin
# 1: specify group
# 2: load balance, select the max free space group to upload file
store_lookup=2
# which group to upload file
# when store_lookup set to 1, must set store_group to the group name
store_group=group2
# which storage server to upload file
# 0: round robin (default)
# 1: the first server order by ip address
# 2: the first server order by priority (the minimal)
# Note: if use_trunk_file set to true, must set store_server to 1 or 2
store_server=0
# which path(means disk or mount point) of the storage server to upload file
# 0: round robin
# 2: load balance, select the max free space path to upload file
store_path=0
# which storage server to download file
# 0: round robin (default)
# 1: the source storage server which the current file uploaded to
download_server=0
# reserved storage space for system or other applications.
# if the free(available) space of any stoarge server in
# a group <= reserved_storage_space,
# no file can be uploaded to this group.
# bytes unit can be one of follows:
### G or g for gigabyte(GB)
### M or m for megabyte(MB)
### K or k for kilobyte(KB)
### no unit for byte(B)
### XX.XX% as ratio such as reserved_storage_space = 10%
reserved_storage_space = 1%
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
#unix group name to run this program,
#not set (empty) means run by the group of current user
run_by_group=
#unix username to run this program,
#not set (empty) means run by current user
run_by_user=
# allow_hosts can ocur more than once, host can be hostname or ip address,
# "*" (only one asterisk) means match all ip addresses
# we can use CIDR ips like 192.168.5.64/26
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
# for example:
# allow_hosts=10.0.1.[1-15,20]
# allow_hosts=host[01-08,20-25].domain.com
# allow_hosts=192.168.5.64/26
allow_hosts=*
# sync log buff to disk every interval seconds
# default value is 10 seconds
sync_log_buff_interval = 10
# check storage server alive interval seconds
check_active_interval = 120
# thread stack size, should >= 64KB
# default value is 64KB
thread_stack_size = 64KB
# auto adjust when the ip address of the storage server changed
# default value is true
storage_ip_changed_auto_adjust = true
# storage sync file max delay seconds
# default value is 86400 seconds (one day)
# since V2.00
storage_sync_file_max_delay = 86400
# the max time of storage sync a file
# default value is 300 seconds
# since V2.00
storage_sync_file_max_time = 300
# if use a trunk file to store several small files
# default value is false
# since V3.00
use_trunk_file = false
# the min slot size, should <= 4KB
# default value is 256 bytes
# since V3.00
slot_min_size = 256
# the max slot size, should > slot_min_size
# store the upload file to trunk file when it's size <= this value
# default value is 16MB
# since V3.00
slot_max_size = 16MB
# the trunk file size, should >= 4MB
# default value is 64MB
# since V3.00
trunk_file_size = 64MB
# if create trunk file advancely
# default value is false
# since V3.06
trunk_create_file_advance = false
# the time base to create trunk file
# the time format: HH:MM
# default value is 02:00
# since V3.06
trunk_create_file_time_base = 02:00
# the interval of create trunk file, unit: second
# default value is 38400 (one day)
# since V3.06
trunk_create_file_interval = 86400
# the threshold to create trunk file
# when the free trunk file size less than the threshold, will create
# the trunk files
# default value is 0
# since V3.06
trunk_create_file_space_threshold = 20G
# if check trunk space occupying when loading trunk free spaces
# the occupied spaces will be ignored
# default value is false
# since V3.09
# NOTICE: set this parameter to true will slow the loading of trunk spaces
# when startup. you should set this parameter to true when neccessary.
trunk_init_check_occupying = false
# if ignore storage_trunk.dat, reload from trunk binlog
# default value is false
# since V3.10
# set to true once for version upgrade when your version less than V3.10
trunk_init_reload_from_binlog = false
# the min interval for compressing the trunk binlog file
# unit: second
# default value is 0, 0 means never compress
# FastDFS compress the trunk binlog when trunk init and trunk destroy
# recommand to set this parameter to 86400 (one day)
# since V5.01
trunk_compress_binlog_min_interval = 0
# if use storage ID instead of IP address
# default value is false
# since V4.00
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# since V4.00
storage_ids_filename = storage_ids.conf
# id type of the storage server in the filename, values are:
## ip: the ip address of the storage server
## id: the server id of the storage server
# this paramter is valid only when use_storage_id set to true
# default value is ip
# since V4.03
id_type_in_filename = ip
# if store slave file use symbol link
# default value is false
# since V4.01
store_slave_file_use_link = false
# if rotate the error log every day
# default value is false
# since V4.02
rotate_error_log = false
# rotate error log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.02
error_log_rotate_time=00:00
# rotate error log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_error_log_size = 0
# keep days of the log files
# 0 means do not delete old log files
# default value is 0
log_file_keep_days = 0
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# HTTP port on this tracker server
http.server_port=8080
# check storage HTTP server alive interval seconds
# <= 0 for never check
# default value is 30
http.check_alive_interval=30
# check storage HTTP server alive type, values are:
# tcp : connect to the storge server with HTTP port only,
# do not request and get response
# http: storage check alive url must return http status 200
# default value is tcp
http.check_alive_type=tcp
# check storage HTTP server alive uri/url
# NOTE: storage embed HTTP server support uri: /status.html
http.check_alive_uri=/status.html

View File

@ -0,0 +1,26 @@
#!/bin/bash
new_val=$FASTDFS_IPADDR
old="com.ikingtech.ch116221"
sed -i "s/$old/$new_val/g" /etc/fdfs/client.conf
sed -i "s/$old/$new_val/g" /etc/fdfs/storage.conf
sed -i "s/$old/$new_val/g" /etc/fdfs/mod_fastdfs.conf
cat /etc/fdfs/client.conf > /etc/fdfs/client.txt
cat /etc/fdfs/storage.conf > /etc/fdfs/storage.txt
cat /etc/fdfs/mod_fastdfs.conf > /etc/fdfs/mod_fastdfs.txt
mv /usr/local/nginx/conf/nginx.conf /usr/local/nginx/conf/nginx.conf.t
cp /etc/fdfs/nginx.conf /usr/local/nginx/conf
echo "start trackerd"
/etc/init.d/fdfs_trackerd start
echo "start storage"
/etc/init.d/fdfs_storaged start
echo "start nginx"
/usr/local/nginx/sbin/nginx
tail -f /dev/null

View File

@ -0,0 +1,39 @@
# centos 7
FROM centos:7
# 添加配置文件
ADD conf/client.conf /etc/fdfs/
ADD conf/http.conf /etc/fdfs/
ADD conf/mime.types /etc/fdfs/
ADD conf/storage.conf /etc/fdfs/
ADD conf/tracker.conf /etc/fdfs/
ADD fastdfs.sh /home
ADD conf/nginx.conf /etc/fdfs/
ADD conf/mod_fastdfs.conf /etc/fdfs
# run
RUN yum install git gcc gcc-c++ make automake autoconf libtool pcre pcre-devel zlib zlib-devel openssl-devel wget vim -y \
&& cd /usr/local/src \
&& git clone https://github.com/happyfish100/libfastcommon.git --depth 1 \
&& git clone https://github.com/happyfish100/fastdfs.git --depth 1 \
&& git clone https://github.com/happyfish100/fastdfs-nginx-module.git --depth 1 \
&& wget http://nginx.org/download/nginx-1.15.4.tar.gz \
&& tar -zxvf nginx-1.15.4.tar.gz \
&& mkdir /home/dfs \
&& cd /usr/local/src/ \
&& cd libfastcommon/ \
&& ./make.sh && ./make.sh install \
&& cd ../ \
&& cd fastdfs/ \
&& ./make.sh && ./make.sh install \
&& cd ../ \
&& cd nginx-1.15.4/ \
&& ./configure --add-module=/usr/local/src/fastdfs-nginx-module/src/ \
&& make && make install \
&& chmod +x /home/fastdfs.sh
# export config
VOLUME /etc/fdfs
EXPOSE 22122 23000 8888 80
ENTRYPOINT ["/home/fastdfs.sh"]

View File

@ -0,0 +1,45 @@
# FastDFS Dockerfile network (网络版本)
## 声明
其实并没什么区别 教程是在上一位huayanYu(小锅盖)和 Wiki的作者 的基础上进行了一些修改本质上还是huayanYu(小锅盖) 和 Wiki 上的作者写的教程
## 目录介绍
### conf
Dockerfile 所需要的一些配置文件
当然你也可以对这些文件进行一些修改 比如 storage.conf 里面的 bast_path 等相关
## 使用方法
需要注意的是 你需要在运行容器的时候制定宿主机的ip 用参数 FASTDFS_IPADDR 来指定
```
docker run -d -e FASTDFS_IPADDR=192.168.1.234 -p 8888:8888 -p 22122:22122 -p 23000:23000 -p 8011:80 --name test-fast 镜像id/镜像名称
```
## 后记
本质上 local 版本与 network 版本无区别
## Statement
In fact, there is no difference between the tutorials written by Huayan Yu and Wiki on the basis of their previous authors. In essence, they are also tutorials written by the authors of Huayan Yu and Wiki.
## Catalogue introduction
### conf
Dockerfile Some configuration files needed
Of course, you can also make some modifications to these files, such as bast_path in storage. conf, etc.
## Usage method
Note that you need to specify the host IP when running the container with the parameter FASTDFS_IPADDR
Here's a sample docker run instruction
```
docker run -d -e FASTDFS_IPADDR=192.168.1.234 -p 8888:8888 -p 22122:22122 -p 23000:23000 -p 8011:80 --name test-fast 镜像id/镜像名称
```
## Epilogue
Essentially, there is no difference between the local version and the network version.

View File

@ -0,0 +1,62 @@
# connect timeout in seconds
# default value is 30s
connect_timeout=30
# network timeout in seconds
# default value is 30s
network_timeout=60
# the base path to store log files
base_path=/home/dfs
# tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
tracker_server=com.ikingtech.ch116221:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# if load FastDFS parameters from tracker server
# since V4.05
# default value is false
load_fdfs_parameters_from_tracker=false
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V4.05
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V4.05
storage_ids_filename = storage_ids.conf
#HTTP settings
http.tracker_server_port=80
#use "#include" directive to include HTTP other settiongs
##include http.conf

View File

@ -0,0 +1,29 @@
# HTTP default content type
http.default_content_type = application/octet-stream
# MIME types mapping filename
# MIME types file format: MIME_type extensions
# such as: image/jpeg jpeg jpg jpe
# you can use apache's MIME file: mime.types
http.mime_types_filename=mime.types
# if use token to anti-steal
# default value is false (0)
http.anti_steal.check_token=false
# token TTL (time to live), seconds
# default value is 600
http.anti_steal.token_ttl=900
# secret key to generate anti-steal token
# this parameter must be set when http.anti_steal.check_token set to true
# the length of the secret key should not exceed 128 bytes
http.anti_steal.secret_key=FastDFS1234567890
# return the content of the file when check token fail
# default value is empty (no file sepecified)
http.anti_steal.token_check_fail=/home/yuqing/fastdfs/conf/anti-steal.jpg
# if support multi regions for HTTP Range
# default value is true
http.multi_range.enabed = true

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,133 @@
# connect timeout in seconds
# default value is 30s
connect_timeout=2
# network recv and send timeout in seconds
# default value is 30s
network_timeout=30
# the base path to store log files
base_path=/tmp
# if load FastDFS parameters from tracker server
# since V1.12
# default value is false
load_fdfs_parameters_from_tracker=true
# storage sync file max delay seconds
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V1.12
# default value is 86400 seconds (one day)
storage_sync_file_max_delay = 86400
# if use storage ID instead of IP address
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# default value is false
# since V1.13
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# same as tracker.conf
# valid only when load_fdfs_parameters_from_tracker is false
# since V1.13
storage_ids_filename = storage_ids.conf
# FastDFS tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
# valid only when load_fdfs_parameters_from_tracker is true
tracker_server=com.ikingtech.ch116221:22122
# the port of the local storage server
# the default value is 23000
storage_server_port=23000
# the group name of the local storage server
group_name=group1
# if the url / uri including the group name
# set to false when uri like /M00/00/00/xxx
# set to true when uri like ${group_name}/M00/00/00/xxx, such as group1/M00/xxx
# default value is false
url_have_group_name = true
# path(disk or mount point) count, default value is 1
# must same as storage.conf
store_path_count=1
# store_path#, based 0, if store_path0 not exists, it's value is base_path
# the paths must be exist
# must same as storage.conf
store_path0=/home/dfs
#store_path1=/home/yuqing/fastdfs1
# standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
# set the log filename, such as /usr/local/apache2/logs/mod_fastdfs.log
# empty for output to stderr (apache and nginx error_log file)
log_filename=
# response mode when the file not exist in the local file system
## proxy: get the content from other storage server, then send to client
## redirect: redirect to the original storage server (HTTP Header is Location)
response_mode=proxy
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
# multi aliases split by comma. empty value means auto set by OS type
# this paramter used to get all ip address of the local host
# default values is empty
if_alias_prefix=
# use "#include" directive to include HTTP config file
# NOTE: #include is an include directive, do NOT remove the # before include
#include http.conf
# if support flv
# default value is false
# since v1.15
flv_support = true
# flv file extension name
# default value is flv
# since v1.15
flv_extension = flv
# set the group count
# set to none zero to support multi-group on this storage server
# set to 0 for single group only
# groups settings section as [group1], [group2], ..., [groupN]
# default value is 0
# since v1.14
group_count = 0
# group settings for group #1
# since v1.14
# when support multi-group on this storage server, uncomment following section
#[group1]
#group_name=group1
#storage_server_port=23000
#store_path_count=2
#store_path0=/home/yuqing/fastdfs
#store_path1=/home/yuqing/fastdfs1
# group settings for group #2
# since v1.14
# when support multi-group, uncomment following section as neccessary
#[group2]
#group_name=group2
#storage_server_port=23000
#store_path_count=1
#store_path0=/home/yuqing/fastdfs

View File

@ -0,0 +1,127 @@
#user nobody;
worker_processes 1;
#error_log logs/error.log;
#error_log logs/error.log notice;
#error_log logs/error.log info;
#pid logs/nginx.pid;
events {
worker_connections 1024;
}
http {
include mime.types;
default_type application/octet-stream;
#log_format main '$remote_addr - $remote_user [$time_local] "$request" '
# '$status $body_bytes_sent "$http_referer" '
# '"$http_user_agent" "$http_x_forwarded_for"';
#access_log logs/access.log main;
sendfile on;
#tcp_nopush on;
#keepalive_timeout 0;
keepalive_timeout 65;
#gzip on;
server {
listen 80;
server_name localhost;
#charset koi8-r;
#access_log logs/host.access.log main;
location / {
root html;
index index.html index.htm;
}
#error_page 404 /404.html;
# redirect server error pages to the static page /50x.html
#
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
# proxy the PHP scripts to Apache listening on 127.0.0.1:80
#
#location ~ \.php$ {
# proxy_pass http://127.0.0.1;
#}
# pass the PHP scripts to FastCGI server listening on 127.0.0.1:9000
#
#location ~ \.php$ {
# root html;
# fastcgi_pass 127.0.0.1:9000;
# fastcgi_index index.php;
# fastcgi_param SCRIPT_FILENAME /scripts$fastcgi_script_name;
# include fastcgi_params;
#}
# deny access to .htaccess files, if Apache's document root
# concurs with nginx's one
#
#location ~ /\.ht {
# deny all;
#}
}
server {
listen 8888;
server_name localhost;
location ~/group[0-9]/ {
ngx_fastdfs_module;
}
error_page 500 502 503 504 /50x.html;
location = /50x.html {
root html;
}
}
# another virtual host using mix of IP-, name-, and port-based configuration
#
#server {
# listen 8000;
# listen somename:8080;
# server_name somename alias another.alias;
# location / {
# root html;
# index index.html index.htm;
# }
#}
# HTTPS server
#
#server {
# listen 443 ssl;
# server_name localhost;
# ssl_certificate cert.pem;
# ssl_certificate_key cert.key;
# ssl_session_cache shared:SSL:1m;
# ssl_session_timeout 5m;
# ssl_ciphers HIGH:!aNULL:!MD5;
# ssl_prefer_server_ciphers on;
# location / {
# root html;
# index index.html index.htm;
# }
#}
}

View File

@ -0,0 +1,287 @@
# is this config file disabled
# false for enabled
# true for disabled
disabled=false
# the name of the group this storage server belongs to
#
# comment or remove this item for fetching from tracker server,
# in this case, use_storage_id must set to true in tracker.conf,
# and storage_ids.conf must be configed correctly.
group_name=group1
# bind an address of this host
# empty for bind all addresses of this host
bind_addr=
# if bind an address of this host when connect to other servers
# (this storage server as a client)
# true for binding the address configed by above parameter: "bind_addr"
# false for binding any address of this host
client_bind=true
# the storage server port
port=23000
# connect timeout in seconds
# default value is 30s
connect_timeout=10
# network timeout in seconds
# default value is 30s
network_timeout=60
# heart beat interval in seconds
heart_beat_interval=30
# disk usage report interval in seconds
stat_report_interval=60
# the base path to store data and log files
base_path=/home/dfs
# max concurrent connections the server supported
# default value is 256
# more max_connections means more memory will be used
# you should set this parameter larger, eg. 10240
max_connections=1024
# the buff size to recv / send data
# this parameter must more than 8KB
# default value is 64KB
# since V2.00
buff_size = 256KB
# accept thread count
# default value is 1
# since V4.07
accept_threads=1
# work thread count, should <= max_connections
# work thread deal network io
# default value is 4
# since V2.00
work_threads=4
# if disk read / write separated
## false for mixed read and write
## true for separated read and write
# default value is true
# since V2.00
disk_rw_separated = true
# disk reader thread count per store base path
# for mixed read / write, this parameter can be 0
# default value is 1
# since V2.00
disk_reader_threads = 1
# disk writer thread count per store base path
# for mixed read / write, this parameter can be 0
# default value is 1
# since V2.00
disk_writer_threads = 1
# when no entry to sync, try read binlog again after X milliseconds
# must > 0, default value is 200ms
sync_wait_msec=50
# after sync a file, usleep milliseconds
# 0 for sync successively (never call usleep)
sync_interval=0
# storage sync start time of a day, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
sync_start_time=00:00
# storage sync end time of a day, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
sync_end_time=23:59
# write to the mark file after sync N files
# default value is 500
write_mark_file_freq=500
# path(disk or mount point) count, default value is 1
store_path_count=1
# store_path#, based 0, if store_path0 not exists, it's value is base_path
# the paths must be exist
store_path0=/home/dfs
#store_path1=/home/dfs2
# subdir_count * subdir_count directories will be auto created under each
# store_path (disk), value can be 1 to 256, default value is 256
subdir_count_per_path=256
# tracker_server can ocur more than once, and tracker_server format is
# "host:port", host can be hostname or ip address
tracker_server=com.ikingtech.ch116221:22122
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
#unix group name to run this program,
#not set (empty) means run by the group of current user
run_by_group=
#unix username to run this program,
#not set (empty) means run by current user
run_by_user=
# allow_hosts can ocur more than once, host can be hostname or ip address,
# "*" (only one asterisk) means match all ip addresses
# we can use CIDR ips like 192.168.5.64/26
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
# for example:
# allow_hosts=10.0.1.[1-15,20]
# allow_hosts=host[01-08,20-25].domain.com
# allow_hosts=192.168.5.64/26
allow_hosts=*
# the mode of the files distributed to the data path
# 0: round robin(default)
# 1: random, distributted by hash code
file_distribute_path_mode=0
# valid when file_distribute_to_path is set to 0 (round robin),
# when the written file count reaches this number, then rotate to next path
# default value is 100
file_distribute_rotate_count=100
# call fsync to disk when write big file
# 0: never call fsync
# other: call fsync when written bytes >= this bytes
# default value is 0 (never call fsync)
fsync_after_written_bytes=0
# sync log buff to disk every interval seconds
# must > 0, default value is 10 seconds
sync_log_buff_interval=10
# sync binlog buff / cache to disk every interval seconds
# default value is 60 seconds
sync_binlog_buff_interval=10
# sync storage stat info to disk every interval seconds
# default value is 300 seconds
sync_stat_file_interval=300
# thread stack size, should >= 512KB
# default value is 512KB
thread_stack_size=512KB
# the priority as a source server for uploading file.
# the lower this value, the higher its uploading priority.
# default value is 10
upload_priority=10
# the NIC alias prefix, such as eth in Linux, you can see it by ifconfig -a
# multi aliases split by comma. empty value means auto set by OS type
# default values is empty
if_alias_prefix=
# if check file duplicate, when set to true, use FastDHT to store file indexes
# 1 or yes: need check
# 0 or no: do not check
# default value is 0
check_file_duplicate=0
# file signature method for check file duplicate
## hash: four 32 bits hash code
## md5: MD5 signature
# default value is hash
# since V4.01
file_signature_method=hash
# namespace for storing file indexes (key-value pairs)
# this item must be set when check_file_duplicate is true / on
key_namespace=FastDFS
# set keep_alive to 1 to enable persistent connection with FastDHT servers
# default value is 0 (short connection)
keep_alive=0
# you can use "#include filename" (not include double quotes) directive to
# load FastDHT server list, when the filename is a relative path such as
# pure filename, the base path is the base path of current/this config file.
# must set FastDHT server list when check_file_duplicate is true / on
# please see INSTALL of FastDHT for detail
##include /home/yuqing/fastdht/conf/fdht_servers.conf
# if log to access log
# default value is false
# since V4.00
use_access_log = false
# if rotate the access log every day
# default value is false
# since V4.00
rotate_access_log = false
# rotate access log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.00
access_log_rotate_time=00:00
# if rotate the error log every day
# default value is false
# since V4.02
rotate_error_log = false
# rotate error log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.02
error_log_rotate_time=00:00
# rotate access log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_access_log_size = 0
# rotate error log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_error_log_size = 0
# keep days of the log files
# 0 means do not delete old log files
# default value is 0
log_file_keep_days = 0
# if skip the invalid record when sync file
# default value is false
# since V4.02
file_sync_skip_invalid_record=false
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# use the ip address of this storage server if domain_name is empty,
# else this domain name will ocur in the url redirected by the tracker server
http.domain_name=
# the port of the web server on this storage server
http.server_port=8888

View File

@ -0,0 +1,278 @@
# is this config file disabled
# false for enabled
# true for disabled
disabled=false
# bind an address of this host
# empty for bind all addresses of this host
bind_addr=
# the tracker server port
port=22122
# connect timeout in seconds
# default value is 30s
connect_timeout=10
# network timeout in seconds
# default value is 30s
network_timeout=60
# the base path to store data and log files
base_path=/home/dfs
# max concurrent connections this server supported
# you should set this parameter larger, eg. 102400
max_connections=1024
# accept thread count
# default value is 1
# since V4.07
accept_threads=1
# work thread count, should <= max_connections
# default value is 4
# since V2.00
work_threads=4
# min buff size
# default value 8KB
min_buff_size = 8KB
# max buff size
# default value 128KB
max_buff_size = 128KB
# the method of selecting group to upload files
# 0: round robin
# 1: specify group
# 2: load balance, select the max free space group to upload file
store_lookup=2
# which group to upload file
# when store_lookup set to 1, must set store_group to the group name
store_group=group2
# which storage server to upload file
# 0: round robin (default)
# 1: the first server order by ip address
# 2: the first server order by priority (the minimal)
# Note: if use_trunk_file set to true, must set store_server to 1 or 2
store_server=0
# which path(means disk or mount point) of the storage server to upload file
# 0: round robin
# 2: load balance, select the max free space path to upload file
store_path=0
# which storage server to download file
# 0: round robin (default)
# 1: the source storage server which the current file uploaded to
download_server=0
# reserved storage space for system or other applications.
# if the free(available) space of any stoarge server in
# a group <= reserved_storage_space,
# no file can be uploaded to this group.
# bytes unit can be one of follows:
### G or g for gigabyte(GB)
### M or m for megabyte(MB)
### K or k for kilobyte(KB)
### no unit for byte(B)
### XX.XX% as ratio such as reserved_storage_space = 10%
reserved_storage_space = 1%
#standard log level as syslog, case insensitive, value list:
### emerg for emergency
### alert
### crit for critical
### error
### warn for warning
### notice
### info
### debug
log_level=info
#unix group name to run this program,
#not set (empty) means run by the group of current user
run_by_group=
#unix username to run this program,
#not set (empty) means run by current user
run_by_user=
# allow_hosts can ocur more than once, host can be hostname or ip address,
# "*" (only one asterisk) means match all ip addresses
# we can use CIDR ips like 192.168.5.64/26
# and also use range like these: 10.0.1.[0-254] and host[01-08,20-25].domain.com
# for example:
# allow_hosts=10.0.1.[1-15,20]
# allow_hosts=host[01-08,20-25].domain.com
# allow_hosts=192.168.5.64/26
allow_hosts=*
# sync log buff to disk every interval seconds
# default value is 10 seconds
sync_log_buff_interval = 10
# check storage server alive interval seconds
check_active_interval = 120
# thread stack size, should >= 64KB
# default value is 64KB
thread_stack_size = 64KB
# auto adjust when the ip address of the storage server changed
# default value is true
storage_ip_changed_auto_adjust = true
# storage sync file max delay seconds
# default value is 86400 seconds (one day)
# since V2.00
storage_sync_file_max_delay = 86400
# the max time of storage sync a file
# default value is 300 seconds
# since V2.00
storage_sync_file_max_time = 300
# if use a trunk file to store several small files
# default value is false
# since V3.00
use_trunk_file = false
# the min slot size, should <= 4KB
# default value is 256 bytes
# since V3.00
slot_min_size = 256
# the max slot size, should > slot_min_size
# store the upload file to trunk file when it's size <= this value
# default value is 16MB
# since V3.00
slot_max_size = 16MB
# the trunk file size, should >= 4MB
# default value is 64MB
# since V3.00
trunk_file_size = 64MB
# if create trunk file advancely
# default value is false
# since V3.06
trunk_create_file_advance = false
# the time base to create trunk file
# the time format: HH:MM
# default value is 02:00
# since V3.06
trunk_create_file_time_base = 02:00
# the interval of create trunk file, unit: second
# default value is 38400 (one day)
# since V3.06
trunk_create_file_interval = 86400
# the threshold to create trunk file
# when the free trunk file size less than the threshold, will create
# the trunk files
# default value is 0
# since V3.06
trunk_create_file_space_threshold = 20G
# if check trunk space occupying when loading trunk free spaces
# the occupied spaces will be ignored
# default value is false
# since V3.09
# NOTICE: set this parameter to true will slow the loading of trunk spaces
# when startup. you should set this parameter to true when neccessary.
trunk_init_check_occupying = false
# if ignore storage_trunk.dat, reload from trunk binlog
# default value is false
# since V3.10
# set to true once for version upgrade when your version less than V3.10
trunk_init_reload_from_binlog = false
# the min interval for compressing the trunk binlog file
# unit: second
# default value is 0, 0 means never compress
# FastDFS compress the trunk binlog when trunk init and trunk destroy
# recommand to set this parameter to 86400 (one day)
# since V5.01
trunk_compress_binlog_min_interval = 0
# if use storage ID instead of IP address
# default value is false
# since V4.00
use_storage_id = false
# specify storage ids filename, can use relative or absolute path
# since V4.00
storage_ids_filename = storage_ids.conf
# id type of the storage server in the filename, values are:
## ip: the ip address of the storage server
## id: the server id of the storage server
# this paramter is valid only when use_storage_id set to true
# default value is ip
# since V4.03
id_type_in_filename = ip
# if store slave file use symbol link
# default value is false
# since V4.01
store_slave_file_use_link = false
# if rotate the error log every day
# default value is false
# since V4.02
rotate_error_log = false
# rotate error log time base, time format: Hour:Minute
# Hour from 0 to 23, Minute from 0 to 59
# default value is 00:00
# since V4.02
error_log_rotate_time=00:00
# rotate error log when the log file exceeds this size
# 0 means never rotates log file by log file size
# default value is 0
# since V4.02
rotate_error_log_size = 0
# keep days of the log files
# 0 means do not delete old log files
# default value is 0
log_file_keep_days = 0
# if use connection pool
# default value is false
# since V4.05
use_connection_pool = false
# connections whose the idle time exceeds this time will be closed
# unit: second
# default value is 3600
# since V4.05
connection_pool_max_idle_time = 3600
# HTTP port on this tracker server
http.server_port=8080
# check storage HTTP server alive interval seconds
# <= 0 for never check
# default value is 30
http.check_alive_interval=30
# check storage HTTP server alive type, values are:
# tcp : connect to the storge server with HTTP port only,
# do not request and get response
# http: storage check alive url must return http status 200
# default value is tcp
http.check_alive_type=tcp
# check storage HTTP server alive uri/url
# NOTE: storage embed HTTP server support uri: /status.html
http.check_alive_uri=/status.html

View File

@ -0,0 +1,26 @@
#!/bin/bash
new_val=$FASTDFS_IPADDR
old="com.ikingtech.ch116221"
sed -i "s/$old/$new_val/g" /etc/fdfs/client.conf
sed -i "s/$old/$new_val/g" /etc/fdfs/storage.conf
sed -i "s/$old/$new_val/g" /etc/fdfs/mod_fastdfs.conf
cat /etc/fdfs/client.conf > /etc/fdfs/client.txt
cat /etc/fdfs/storage.conf > /etc/fdfs/storage.txt
cat /etc/fdfs/mod_fastdfs.conf > /etc/fdfs/mod_fastdfs.txt
mv /usr/local/nginx/conf/nginx.conf /usr/local/nginx/conf/nginx.conf.t
cp /etc/fdfs/nginx.conf /usr/local/nginx/conf
echo "start trackerd"
/etc/init.d/fdfs_trackerd start
echo "start storage"
/etc/init.d/fdfs_storaged start
echo "start nginx"
/usr/local/nginx/sbin/nginx
tail -f /dev/null

View File

@ -0,0 +1,75 @@
.SUFFIXES: .c .o .lo
COMPILE = $(CC) $(CFLAGS)
INC_PATH =
LIB_PATH = $(LIBS)
TARGET_LIB = $(TARGET_PREFIX)/$(LIB_VERSION)
FAST_SHARED_OBJS = hash.lo chain.lo shared_func.lo ini_file_reader.lo \
logger.lo sockopt.lo base64.lo sched_thread.lo \
http_func.lo md5.lo pthread_func.lo local_ip_func.lo \
avl_tree.lo ioevent.lo ioevent_loop.lo fast_task_queue.lo \
fast_timer.lo process_ctrl.lo fast_mblock.lo \
connection_pool.lo fast_mpool.lo fast_allocator.lo \
fast_buffer.lo multi_skiplist.lo flat_skiplist.lo \
system_info.lo fast_blocked_queue.lo id_generator.lo \
char_converter.lo char_convert_loader.lo common_blocked_queue.lo \
multi_socket_client.lo skiplist_set.lo uniq_skiplist.lo \
json_parser.lo buffered_file_writer.lo server_id_func.lo
FAST_STATIC_OBJS = hash.o chain.o shared_func.o ini_file_reader.o \
logger.o sockopt.o base64.o sched_thread.o \
http_func.o md5.o pthread_func.o local_ip_func.o \
avl_tree.o ioevent.o ioevent_loop.o fast_task_queue.o \
fast_timer.o process_ctrl.o fast_mblock.o \
connection_pool.o fast_mpool.o fast_allocator.o \
fast_buffer.o multi_skiplist.o flat_skiplist.o \
system_info.o fast_blocked_queue.o id_generator.o \
char_converter.o char_convert_loader.o common_blocked_queue.o \
multi_socket_client.o skiplist_set.o uniq_skiplist.o \
json_parser.o buffered_file_writer.o server_id_func.lo
HEADER_FILES = common_define.h hash.h chain.h logger.h base64.h \
shared_func.h pthread_func.h ini_file_reader.h _os_define.h \
sockopt.h sched_thread.h http_func.h md5.h local_ip_func.h \
avl_tree.h ioevent.h ioevent_loop.h fast_task_queue.h \
fast_timer.h process_ctrl.h fast_mblock.h \
connection_pool.h fast_mpool.h fast_allocator.h \
fast_buffer.h skiplist.h multi_skiplist.h flat_skiplist.h \
skiplist_common.h system_info.h fast_blocked_queue.h \
php7_ext_wrapper.h id_generator.h char_converter.h \
char_convert_loader.h common_blocked_queue.h \
multi_socket_client.h skiplist_set.h uniq_skiplist.h \
fc_list.h json_parser.h buffered_file_writer.h server_id_func.h
ALL_OBJS = $(FAST_STATIC_OBJS) $(FAST_SHARED_OBJS)
ALL_PRGS =
SHARED_LIBS = libfastcommon.so
STATIC_LIBS = libfastcommon.a
ALL_LIBS = $(SHARED_LIBS) $(STATIC_LIBS)
all: $(ALL_OBJS) $(ALL_PRGS) $(ALL_LIBS)
libfastcommon.so: $(FAST_SHARED_OBJS)
$(COMPILE) -o $@ -shared $(FAST_SHARED_OBJS) $(LIB_PATH)
libfastcommon.a: $(FAST_STATIC_OBJS)
ar rcs $@ $(FAST_STATIC_OBJS)
.o:
$(COMPILE) -o $@ $< $(FAST_STATIC_OBJS) $(LIB_PATH) $(INC_PATH)
.c:
$(COMPILE) -o $@ $< $(FAST_STATIC_OBJS) $(LIB_PATH) $(INC_PATH)
.c.o:
$(COMPILE) -c -o $@ $< $(INC_PATH)
.c.lo:
$(COMPILE) -c -fPIC -o $@ $< $(INC_PATH)
install:
mkdir -p $(TARGET_LIB)
mkdir -p $(TARGET_PREFIX)/lib
mkdir -p $(TARGET_PREFIX)/include/fastcommon
install -m 755 $(SHARED_LIBS) $(TARGET_LIB)
install -m 644 $(HEADER_FILES) $(TARGET_PREFIX)/include/fastcommon
if [ ! -e $(TARGET_PREFIX)/lib/libfastcommon.so ]; then ln -s $(TARGET_LIB)/libfastcommon.so $(TARGET_PREFIX)/lib/libfastcommon.so; fi
clean:
rm -f $(ALL_OBJS) $(ALL_PRGS) $(ALL_LIBS)

View File

@ -0,0 +1,789 @@
#include "avl_tree.h"
int avl_tree_init(AVLTreeInfo *tree, FreeDataFunc free_data_func, \
CompareFunc compare_func)
{
tree->root = NULL;
tree->free_data_func = free_data_func;
tree->compare_func = compare_func;
return 0;
}
static void avl_tree_destroy_loop(FreeDataFunc free_data_func, \
AVLTreeNode *pCurrentNode)
{
if (pCurrentNode->left != NULL)
{
avl_tree_destroy_loop(free_data_func, pCurrentNode->left);
}
if (pCurrentNode->right != NULL)
{
avl_tree_destroy_loop(free_data_func, pCurrentNode->right);
}
if (free_data_func != NULL)
{
free_data_func(pCurrentNode->data);
}
free(pCurrentNode);
}
void avl_tree_destroy(AVLTreeInfo *tree)
{
if (tree == NULL)
{
return;
}
if (tree->root != NULL)
{
avl_tree_destroy_loop(tree->free_data_func, tree->root);
tree->root = NULL;
}
}
static AVLTreeNode *createTreeNode(AVLTreeNode *pParentNode, void *target_data)
{
AVLTreeNode *pNewNode;
pNewNode = (AVLTreeNode *)malloc(sizeof(AVLTreeNode));
if (pNewNode == NULL)
{
/*
fprintf(stderr, "file: "__FILE__", line: %d, " \
"malloc %d bytes fail!\n", __LINE__, \
(int)sizeof(AVLTreeNode));
*/
return NULL;
}
pNewNode->left = pNewNode->right = NULL;
pNewNode->data = target_data;
pNewNode->balance = 0;
return pNewNode;
}
static void avlRotateLeft(AVLTreeNode *pRotateNode, AVLTreeNode **ppRaiseNode)
{
*ppRaiseNode = pRotateNode->right;
pRotateNode->right = (*ppRaiseNode)->left;
(*ppRaiseNode)->left = pRotateNode;
}
static void avlRotateRight(AVLTreeNode *pRotateNode, AVLTreeNode **ppRaiseNode)
{
*ppRaiseNode = pRotateNode->left;
pRotateNode->left = (*ppRaiseNode)->right;
(*ppRaiseNode)->right = pRotateNode;
}
static void avlLeftBalanceWhenInsert(AVLTreeNode **pTreeNode, int *taller)
{
AVLTreeNode *leftsub;
AVLTreeNode *rightsub;
leftsub = (*pTreeNode)->left;
switch (leftsub->balance)
{
case -1 :
(*pTreeNode)->balance = leftsub->balance = 0;
avlRotateRight (*pTreeNode, pTreeNode);
*taller = 0;
break;
case 0 :
break;
case 1 :
rightsub = leftsub->right;
switch ( rightsub->balance )
{
case -1:
(*pTreeNode)->balance = 1;
leftsub->balance = 0;
break;
case 0 :
(*pTreeNode)->balance = leftsub->balance = 0;
break;
case 1 :
(*pTreeNode)->balance = 0;
leftsub->balance = -1;
break;
}
rightsub->balance = 0;
avlRotateLeft( leftsub, &((*pTreeNode)->left));
avlRotateRight(*pTreeNode, pTreeNode);
*taller = 0;
}
}
static void avlRightBalanceWhenInsert(AVLTreeNode **pTreeNode, int *taller)
{
AVLTreeNode *rightsub;
AVLTreeNode *leftsub;
rightsub = (*pTreeNode)->right;
switch (rightsub->balance)
{
case 1:
(*pTreeNode)->balance = rightsub->balance = 0;
avlRotateLeft(*pTreeNode, pTreeNode);
*taller = 0;
break;
case 0:
break;
case -1:
leftsub = rightsub->left;
switch (leftsub->balance)
{
case 1 :
(*pTreeNode)->balance = -1;
rightsub->balance = 0;
break;
case 0 :
(*pTreeNode)->balance = rightsub->balance = 0;
break;
case -1 :
(*pTreeNode)->balance = 0;
rightsub->balance = 1;
break;
}
leftsub->balance = 0;
avlRotateRight(rightsub, &((*pTreeNode)->right));
avlRotateLeft(*pTreeNode, pTreeNode);
*taller = 0;
}
}
static int avl_tree_insert_loop(CompareFunc compare_func, AVLTreeNode **pCurrentNode, \
void *target_data, int *taller)
{
int nCompRes;
int success;
if (*pCurrentNode == NULL)
{
*pCurrentNode = createTreeNode(*pCurrentNode, target_data);
if (*pCurrentNode != NULL)
{
*taller = 1;
return 1;
}
else
{
return -ENOMEM;
}
}
nCompRes = compare_func((*pCurrentNode)->data, target_data);
if (nCompRes > 0)
{
success = avl_tree_insert_loop(compare_func, \
&((*pCurrentNode)->left), target_data, taller);
if (*taller != 0)
{
switch ((*pCurrentNode)->balance)
{
case -1:
avlLeftBalanceWhenInsert(pCurrentNode, taller);
break;
case 0:
(*pCurrentNode)->balance = -1;
break;
case 1:
(*pCurrentNode)->balance = 0;
*taller = 0;
break;
}
}
}
else if (nCompRes < 0)
{
success = avl_tree_insert_loop(compare_func, \
&((*pCurrentNode)->right), target_data, taller);
if (*taller != 0)
{
switch ((*pCurrentNode)->balance)
{
case -1:
(*pCurrentNode)->balance = 0;
*taller = 0;
break;
case 0:
(*pCurrentNode)->balance = 1;
break;
case 1:
avlRightBalanceWhenInsert(pCurrentNode, taller);
break;
}
}
}
else
{
return 0;
}
return success;
}
int avl_tree_insert(AVLTreeInfo *tree, void *data)
{
int taller;
taller = 0;
return avl_tree_insert_loop(tree->compare_func, &(tree->root), \
data, &taller);
}
static int avl_tree_replace_loop(CompareFunc compare_func, \
FreeDataFunc free_data_func, AVLTreeNode **pCurrentNode, \
void *target_data, int *taller)
{
int nCompRes;
int success;
if (*pCurrentNode == NULL )
{
*pCurrentNode = createTreeNode(*pCurrentNode, target_data);
if (*pCurrentNode != NULL)
{
*taller = 1;
return 1;
}
else
{
return -ENOMEM;
}
}
nCompRes = compare_func((*pCurrentNode)->data, target_data);
if (nCompRes > 0)
{
success = avl_tree_replace_loop(compare_func, free_data_func,
&((*pCurrentNode)->left), target_data, taller);
if (*taller != 0)
{
switch ((*pCurrentNode)->balance)
{
case -1:
avlLeftBalanceWhenInsert(pCurrentNode, taller);
break;
case 0:
(*pCurrentNode)->balance = -1;
break;
case 1:
(*pCurrentNode)->balance = 0;
*taller = 0;
break;
}
}
}
else if (nCompRes < 0)
{
success = avl_tree_replace_loop(compare_func, free_data_func,
&((*pCurrentNode)->right), target_data, taller);
if (*taller != 0)
{
switch ((*pCurrentNode)->balance)
{
case -1 :
(*pCurrentNode)->balance = 0;
*taller = 0;
break;
case 0 :
(*pCurrentNode)->balance = 1;
break;
case 1 :
avlRightBalanceWhenInsert(pCurrentNode, taller);
break;
}
}
}
else
{
if (free_data_func != NULL)
{
free_data_func((*pCurrentNode)->data);
}
(*pCurrentNode)->data = target_data;
return 0;
}
return success;
}
int avl_tree_replace(AVLTreeInfo *tree, void *data)
{
int taller;
taller = 0;
return avl_tree_replace_loop(tree->compare_func, \
tree->free_data_func, &(tree->root), data, &taller);
}
static AVLTreeNode *avl_tree_find_loop(CompareFunc compare_func, \
AVLTreeNode *pCurrentNode, void *target_data)
{
int nCompRes;
nCompRes = compare_func(pCurrentNode->data, target_data);
if (nCompRes > 0)
{
if (pCurrentNode->left == NULL)
{
return NULL;
}
else
{
return avl_tree_find_loop(compare_func, \
pCurrentNode->left, target_data);
}
}
else if (nCompRes < 0)
{
if (pCurrentNode->right == NULL)
{
return NULL;
}
else
{
return avl_tree_find_loop(compare_func, \
pCurrentNode->right, target_data);
}
}
else
{
return pCurrentNode;
}
}
static void *avl_tree_find_ge_loop(CompareFunc compare_func, \
AVLTreeNode *pCurrentNode, void *target_data)
{
int nCompRes;
void *found;
nCompRes = compare_func(pCurrentNode->data, target_data);
if (nCompRes > 0)
{
if (pCurrentNode->left == NULL)
{
return pCurrentNode->data;
}
found = avl_tree_find_ge_loop(compare_func, \
pCurrentNode->left, target_data);
return found != NULL ? found : pCurrentNode->data;
}
else if (nCompRes < 0)
{
if (pCurrentNode->right == NULL)
{
return NULL;
}
else
{
return avl_tree_find_ge_loop(compare_func, \
pCurrentNode->right, target_data);
}
}
else
{
return pCurrentNode->data;
}
}
void *avl_tree_find(AVLTreeInfo *tree, void *target_data)
{
AVLTreeNode *found;
if (tree->root == NULL)
{
return NULL;
}
found = avl_tree_find_loop(tree->compare_func, \
tree->root, target_data);
return found != NULL ? found->data : NULL;
}
void *avl_tree_find_ge(AVLTreeInfo *tree, void *target_data)
{
void *found;
if (tree->root == NULL)
{
found = NULL;
}
else
{
found = avl_tree_find_ge_loop(tree->compare_func, \
tree->root, target_data);
}
return found;
}
static void avlLeftBalanceWhenDelete(AVLTreeNode **pTreeNode, int *shorter)
{
AVLTreeNode *leftsub;
AVLTreeNode *rightsub;
leftsub = (*pTreeNode)->left;
switch (leftsub->balance)
{
case -1:
(*pTreeNode)->balance = leftsub->balance = 0;
avlRotateRight (*pTreeNode, pTreeNode);
break;
case 0:
leftsub->balance = 1;
avlRotateRight(*pTreeNode, pTreeNode);
*shorter = 0;
break;
case 1:
rightsub = leftsub->right;
switch ( rightsub->balance )
{
case -1:
(*pTreeNode)->balance = 1;
leftsub->balance = 0;
break;
case 0 :
(*pTreeNode)->balance = leftsub->balance = 0;
break;
case 1 :
(*pTreeNode)->balance = 0;
leftsub->balance = -1;
break;
}
rightsub->balance = 0;
avlRotateLeft( leftsub, &((*pTreeNode)->left));
avlRotateRight(*pTreeNode, pTreeNode);
break;
}
}
static void avlRightBalanceWhenDelete(AVLTreeNode **pTreeNode, int *shorter)
{
AVLTreeNode *rightsub;
AVLTreeNode *leftsub;
rightsub = (*pTreeNode)->right;
switch (rightsub->balance)
{
case 1:
(*pTreeNode)->balance = rightsub->balance = 0;
avlRotateLeft(*pTreeNode, pTreeNode);
break;
case 0:
rightsub->balance = -1;
avlRotateLeft(*pTreeNode, pTreeNode);
*shorter = 0;
break;
case -1:
leftsub = rightsub->left;
switch (leftsub->balance)
{
case 1:
(*pTreeNode)->balance = -1;
rightsub->balance = 0;
break;
case 0:
(*pTreeNode)->balance = rightsub->balance = 0;
break;
case -1:
(*pTreeNode)->balance = 0;
rightsub->balance = 1;
break;
}
leftsub->balance = 0;
avlRotateRight(rightsub, &((*pTreeNode)->right));
avlRotateLeft(*pTreeNode, pTreeNode);
break;
}
}
static int avl_tree_delete_loop(AVLTreeInfo *tree, AVLTreeNode **pCurrentNode,\
void *target_data, int *shorter, AVLTreeNode *pDeletedDataNode)
{
int nCompRes;
bool result;
AVLTreeNode *leftsub;
AVLTreeNode *rightsub;
if (pDeletedDataNode != NULL)
{
if ((*pCurrentNode)->right == NULL)
{
pDeletedDataNode->data = (*pCurrentNode)->data;
leftsub = (*pCurrentNode)->left;
free(*pCurrentNode);
*pCurrentNode = leftsub;
*shorter = 1;
return 1;
}
nCompRes = -1;
}
else
{
nCompRes = tree->compare_func((*pCurrentNode)->data, target_data);
}
if (nCompRes > 0)
{
if ((*pCurrentNode)->left == NULL)
{
return 0;
}
result = avl_tree_delete_loop(tree, &((*pCurrentNode)->left), \
target_data, shorter, pDeletedDataNode);
if (*shorter != 0)
{
switch ((*pCurrentNode)->balance)
{
case -1:
(*pCurrentNode)->balance = 0;
break;
case 0:
(*pCurrentNode)->balance = 1;
*shorter = 0;
break;
case 1:
avlRightBalanceWhenDelete(pCurrentNode, shorter);
break;
}
}
return result;
}
else if (nCompRes < 0)
{
if ((*pCurrentNode)->right == NULL)
{
return 0;
}
result = avl_tree_delete_loop(tree, &((*pCurrentNode)->right), \
target_data, shorter, pDeletedDataNode);
if (*shorter != 0)
{
switch ((*pCurrentNode)->balance)
{
case -1:
avlLeftBalanceWhenDelete(pCurrentNode, shorter);
break;
case 0:
(*pCurrentNode)->balance = -1;
*shorter = 0;
break;
case 1:
(*pCurrentNode)->balance = 0;
break;
}
}
return result;
}
else
{
if (tree->free_data_func != NULL)
{
tree->free_data_func((*pCurrentNode)->data);
}
leftsub = (*pCurrentNode)->left;
rightsub = (*pCurrentNode)->right;
if (leftsub == NULL)
{
free(*pCurrentNode);
*pCurrentNode = rightsub;
}
else if (rightsub == NULL)
{
free(*pCurrentNode);
*pCurrentNode = leftsub;
}
else
{
avl_tree_delete_loop(tree, &((*pCurrentNode)->left), \
target_data, shorter, *pCurrentNode);
if (*shorter != 0)
{
switch ((*pCurrentNode)->balance)
{
case -1:
(*pCurrentNode)->balance = 0;
break;
case 0:
(*pCurrentNode)->balance = 1;
*shorter = 0;
break;
case 1:
avlRightBalanceWhenDelete(pCurrentNode, shorter);
break;
}
}
return 1;
}
*shorter = 1;
return 1;
}
}
int avl_tree_delete(AVLTreeInfo *tree, void *data)
{
int shorter;
if (tree->root == NULL)
{
return 0;
}
shorter = 0;
return avl_tree_delete_loop(tree, &(tree->root), data, &shorter, NULL);
}
static int avl_tree_walk_loop(DataOpFunc data_op_func, \
AVLTreeNode *pCurrentNode, void *args)
{
int result;
if (pCurrentNode->left != NULL)
{
result = avl_tree_walk_loop(data_op_func, \
pCurrentNode->left, args);
if (result != 0)
{
return result;
}
}
if ((result=data_op_func(pCurrentNode->data, args)) != 0)
{
return result;
}
/*
if (pCurrentNode->balance >= -1 && pCurrentNode->balance <= 1)
{
//printf("==%d\n", pCurrentNode->balance);
}
else
{
printf("==bad %d!!!!!!!!!!!!\n", pCurrentNode->balance);
}
*/
if (pCurrentNode->right != NULL)
{
result = avl_tree_walk_loop(data_op_func, \
pCurrentNode->right, args);
}
return result;
}
int avl_tree_walk(AVLTreeInfo *tree, DataOpFunc data_op_func, void *args)
{
if (tree->root == NULL)
{
return 0;
}
else
{
return avl_tree_walk_loop(data_op_func, tree->root, args);
}
}
static void avl_tree_count_loop(AVLTreeNode *pCurrentNode, int *count)
{
if (pCurrentNode->left != NULL)
{
avl_tree_count_loop(pCurrentNode->left, count);
}
(*count)++;
if (pCurrentNode->right != NULL)
{
avl_tree_count_loop(pCurrentNode->right, count);
}
}
int avl_tree_count(AVLTreeInfo *tree)
{
int count;
if (tree->root == NULL)
{
return 0;
}
count = 0;
avl_tree_count_loop(tree->root, &count);
return count;
}
int avl_tree_depth(AVLTreeInfo *tree)
{
int depth;
AVLTreeNode *pNode;
if (tree->root == NULL)
{
return 0;
}
depth = 0;
pNode = tree->root;
while (pNode != NULL)
{
if (pNode->balance == -1)
{
pNode = pNode->left;
}
else
{
pNode = pNode->right;
}
depth++;
}
return depth;
}
/*
static void avl_tree_print_loop(AVLTreeNode *pCurrentNode)
{
printf("%ld left: %ld, right: %ld, balance: %d\n", pCurrentNode->data,
pCurrentNode->left != NULL ? pCurrentNode->left->data : 0,
pCurrentNode->right != NULL ? pCurrentNode->right->data : 0,
pCurrentNode->balance);
if (pCurrentNode->left != NULL)
{
avl_tree_print_loop(pCurrentNode->left);
}
if (pCurrentNode->right != NULL)
{
avl_tree_print_loop(pCurrentNode->right);
}
}
void avl_tree_print(AVLTreeInfo *tree)
{
if (tree->root == NULL)
{
return;
}
avl_tree_print_loop(tree->root);
}
*/

View File

@ -0,0 +1,47 @@
#ifndef AVL_TREE_H
#define AVL_TREE_H
#include <stdio.h>
#include <stdlib.h>
#include <pthread.h>
#include "common_define.h"
typedef struct tagAVLTreeNode {
void *data;
struct tagAVLTreeNode *left;
struct tagAVLTreeNode *right;
byte balance;
} AVLTreeNode;
typedef int (*DataOpFunc) (void *data, void *args);
typedef struct tagAVLTreeInfo {
AVLTreeNode *root;
FreeDataFunc free_data_func;
CompareFunc compare_func;
} AVLTreeInfo;
#ifdef __cplusplus
extern "C" {
#endif
int avl_tree_init(AVLTreeInfo *tree, FreeDataFunc free_data_func, \
CompareFunc compare_func);
void avl_tree_destroy(AVLTreeInfo *tree);
int avl_tree_insert(AVLTreeInfo *tree, void *data);
int avl_tree_replace(AVLTreeInfo *tree, void *data);
int avl_tree_delete(AVLTreeInfo *tree, void *data);
void *avl_tree_find(AVLTreeInfo *tree, void *target_data);
void *avl_tree_find_ge(AVLTreeInfo *tree, void *target_data);
int avl_tree_walk(AVLTreeInfo *tree, DataOpFunc data_op_func, void *args);
int avl_tree_count(AVLTreeInfo *tree);
int avl_tree_depth(AVLTreeInfo *tree);
//void avl_tree_print(AVLTreeInfo *tree);
#ifdef __cplusplus
}
#endif
#endif

397
JCEC-fastdfs/fastcommon/base64.c Executable file
View File

@ -0,0 +1,397 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <time.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <unistd.h>
#include <sys/types.h>
#include <sys/stat.h>
#include <fcntl.h>
#include <errno.h>
#include "base64.h"
/**
* Marker value for chars we just ignore, e.g. \n \r high ascii
*/
#define BASE64_IGNORE -1
/**
* Marker for = trailing pad
*/
#define BASE64_PAD -2
/**
* determines how long the lines are that are generated by encode.
* Ignored by decode.
* @param length 0 means no newlines inserted. Must be a multiple of 4.
*/
void base64_set_line_length(struct base64_context *context, const int length)
{
context->line_length = (length / 4) * 4;
}
/**
* How lines are separated.
* Ignored by decode.
* @param context->line_separator may be "" but not null.
* Usually contains only a combination of chars \n and \r.
* Could be any chars not in set A-Z a-z 0-9 + /.
*/
void base64_set_line_separator(struct base64_context *context, \
const char *pLineSeparator)
{
context->line_sep_len = snprintf(context->line_separator, \
sizeof(context->line_separator), "%s", pLineSeparator);
if (context->line_sep_len >= sizeof(context->line_separator))
{
context->line_sep_len = sizeof(context->line_separator) - 1;
}
}
void base64_init_ex(struct base64_context *context, const int nLineLength, \
const unsigned char chPlus, const unsigned char chSplash, \
const unsigned char chPad)
{
int i;
memset(context, 0, sizeof(struct base64_context));
context->line_length = nLineLength;
context->line_separator[0] = '\n';
context->line_separator[1] = '\0';
context->line_sep_len = 1;
// build translate valueToChar table only once.
// 0..25 -> 'A'..'Z'
for (i=0; i<=25; i++)
{
context->valueToChar[i] = (char)('A'+i);
}
// 26..51 -> 'a'..'z'
for (i=0; i<=25; i++ )
{
context->valueToChar[i+26] = (char)('a'+i);
}
// 52..61 -> '0'..'9'
for (i=0; i<=9; i++ )
{
context->valueToChar[i+52] = (char)('0'+i);
}
context->valueToChar[62] = chPlus;
context->valueToChar[63] = chSplash;
memset(context->charToValue, BASE64_IGNORE, sizeof(context->charToValue));
for (i=0; i<64; i++ )
{
context->charToValue[context->valueToChar[i]] = i;
}
context->pad_ch = chPad;
context->charToValue[chPad] = BASE64_PAD;
}
int base64_get_encode_length(struct base64_context *context, const int nSrcLen)
{
// Each group or partial group of 3 bytes becomes four chars
// covered quotient
int outputLength;
outputLength = ((nSrcLen + 2) / 3) * 4;
// account for trailing newlines, on all but the very last line
if (context->line_length != 0)
{
int lines = (outputLength + context->line_length - 1) /
context->line_length - 1;
if ( lines > 0 )
{
outputLength += lines * context->line_sep_len;
}
}
return outputLength;
}
/**
* Encode an arbitrary array of bytes as base64 printable ASCII.
* It will be broken into lines of 72 chars each. The last line is not
* terminated with a line separator.
* The output will always have an even multiple of data characters,
* exclusive of \n. It is padded out with =.
*/
char *base64_encode_ex(struct base64_context *context, const char *src, \
const int nSrcLen, char *dest, int *dest_len, const bool bPad)
{
int linePos;
int leftover;
int combined;
char *pDest;
int c0, c1, c2, c3;
unsigned char *pRaw;
unsigned char *pEnd;
const char *ppSrcs[2];
int lens[2];
char szPad[3];
int k;
int loop;
if (nSrcLen <= 0)
{
*dest = '\0';
*dest_len = 0;
return dest;
}
linePos = 0;
lens[0] = (nSrcLen / 3) * 3;
lens[1] = 3;
leftover = nSrcLen - lens[0];
ppSrcs[0] = src;
ppSrcs[1] = szPad;
szPad[0] = szPad[1] = szPad[2] = '\0';
switch (leftover)
{
case 0:
default:
loop = 1;
break;
case 1:
loop = 2;
szPad[0] = src[nSrcLen-1];
break;
case 2:
loop = 2;
szPad[0] = src[nSrcLen-2];
szPad[1] = src[nSrcLen-1];
break;
}
pDest = dest;
for (k=0; k<loop; k++)
{
pEnd = (unsigned char *)ppSrcs[k] + lens[k];
for (pRaw=(unsigned char *)ppSrcs[k]; pRaw<pEnd; pRaw+=3)
{
// Start a new line if next 4 chars won't fit on the current line
// We can't encapsulete the following code since the variable need to
// be local to this incarnation of encode.
linePos += 4;
if (linePos > context->line_length)
{
if (context->line_length != 0)
{
memcpy(pDest, context->line_separator, context->line_sep_len);
pDest += context->line_sep_len;
}
linePos = 4;
}
// get next three bytes in unsigned form lined up,
// in big-endian order
combined = ((*pRaw) << 16) | ((*(pRaw+1)) << 8) | (*(pRaw+2));
// break those 24 bits into a 4 groups of 6 bits,
// working LSB to MSB.
c3 = combined & 0x3f;
combined >>= 6;
c2 = combined & 0x3f;
combined >>= 6;
c1 = combined & 0x3f;
combined >>= 6;
c0 = combined & 0x3f;
// Translate into the equivalent alpha character
// emitting them in big-endian order.
*pDest++ = context->valueToChar[c0];
*pDest++ = context->valueToChar[c1];
*pDest++ = context->valueToChar[c2];
*pDest++ = context->valueToChar[c3];
}
}
*pDest = '\0';
*dest_len = pDest - dest;
// deal with leftover bytes
switch (leftover)
{
case 0:
default:
// nothing to do
break;
case 1:
// One leftover byte generates xx==
if (bPad)
{
*(pDest-1) = context->pad_ch;
*(pDest-2) = context->pad_ch;
}
else
{
*(pDest-2) = '\0';
*dest_len -= 2;
}
break;
case 2:
// Two leftover bytes generates xxx=
if (bPad)
{
*(pDest-1) = context->pad_ch;
}
else
{
*(pDest-1) = '\0';
*dest_len -= 1;
}
break;
} // end switch;
return dest;
}
char *base64_decode_auto(struct base64_context *context, const char *src, \
const int nSrcLen, char *dest, int *dest_len)
{
int nRemain;
int nPadLen;
int nNewLen;
char tmpBuff[256];
char *pBuff;
nRemain = nSrcLen % 4;
if (nRemain == 0)
{
return base64_decode(context, src, nSrcLen, dest, dest_len);
}
nPadLen = 4 - nRemain;
nNewLen = nSrcLen + nPadLen;
if (nNewLen <= sizeof(tmpBuff))
{
pBuff = tmpBuff;
}
else
{
pBuff = (char *)malloc(nNewLen);
if (pBuff == NULL)
{
fprintf(stderr, "Can't malloc %d bytes\n", \
nSrcLen + nPadLen + 1);
*dest_len = 0;
*dest = '\0';
return dest;
}
}
memcpy(pBuff, src, nSrcLen);
memset(pBuff + nSrcLen, context->pad_ch, nPadLen);
base64_decode(context, pBuff, nNewLen, dest, dest_len);
if (pBuff != tmpBuff)
{
free(pBuff);
}
return dest;
}
/**
* decode a well-formed complete base64 string back into an array of bytes.
* It must have an even multiple of 4 data characters (not counting \n),
* padded out with = as needed.
*/
char *base64_decode(struct base64_context *context, const char *src, \
const int nSrcLen, char *dest, int *dest_len)
{
// tracks where we are in a cycle of 4 input chars.
int cycle;
// where we combine 4 groups of 6 bits and take apart as 3 groups of 8.
int combined;
// will be an even multiple of 4 chars, plus some embedded \n
int dummies;
int value;
unsigned char *pSrc;
unsigned char *pSrcEnd;
char *pDest;
cycle = 0;
combined = 0;
dummies = 0;
pDest = dest;
pSrcEnd = (unsigned char *)src + nSrcLen;
for (pSrc=(unsigned char *)src; pSrc<pSrcEnd; pSrc++)
{
value = context->charToValue[*pSrc];
switch (value)
{
case BASE64_IGNORE:
// e.g. \n, just ignore it.
break;
case BASE64_PAD:
value = 0;
dummies++;
// fallthrough
default:
/* regular value character */
switch (cycle)
{
case 0:
combined = value;
cycle = 1;
break;
case 1:
combined <<= 6;
combined |= value;
cycle = 2;
break;
case 2:
combined <<= 6;
combined |= value;
cycle = 3;
break;
case 3:
combined <<= 6;
combined |= value;
// we have just completed a cycle of 4 chars.
// the four 6-bit values are in combined in big-endian order
// peel them off 8 bits at a time working lsb to msb
// to get our original 3 8-bit bytes back
*pDest++ = (char)(combined >> 16);
*pDest++ = (char)((combined & 0x0000FF00) >> 8);
*pDest++ = (char)(combined & 0x000000FF);
cycle = 0;
break;
}
break;
}
} // end for
if (cycle != 0)
{
*dest = '\0';
*dest_len = 0;
fprintf(stderr, "Input to decode not an even multiple of " \
"4 characters; pad with %c\n", context->pad_ch);
return dest;
}
*dest_len = (pDest - dest) - dummies;
*(dest + (*dest_len)) = '\0';
return dest;
}

134
JCEC-fastdfs/fastcommon/base64.h Executable file
View File

@ -0,0 +1,134 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
//base64.h
#ifndef _BASE64_H
#define _BASE64_H
#include "common_define.h"
struct base64_context
{
char line_separator[16];
int line_sep_len;
/**
* max chars per line, excluding line_separator. A multiple of 4.
*/
int line_length;
/**
* letter of the alphabet used to encode binary values 0..63
*/
unsigned char valueToChar[64];
/**
* binary value encoded by a given letter of the alphabet 0..63
*/
int charToValue[256];
int pad_ch;
};
#ifdef __cplusplus
extern "C" {
#endif
/** use stardand base64 charset
*/
#define base64_init(context, nLineLength) \
base64_init_ex(context, nLineLength, '+', '/', '=')
/** stardand base64 encode
*/
#define base64_encode(context, src, nSrcLen, dest, dest_len) \
base64_encode_ex(context, src, nSrcLen, dest, dest_len, true)
/** base64 init function, before use base64 function, you should call
* this function
* parameters:
* context: the base64 context
* nLineLength: length of a line, 0 for never add line seperator
* chPlus: specify a char for base64 char plus (+)
* chSplash: specify a char for base64 char splash (/)
* chPad: specify a char for base64 padding char =
* return: none
*/
void base64_init_ex(struct base64_context *context, const int nLineLength, \
const unsigned char chPlus, const unsigned char chSplash, \
const unsigned char chPad);
/** calculate the encoded length of the source buffer
* parameters:
* context: the base64 context
* nSrcLen: the source buffer length
* return: the encoded length of the source buffer
*/
int base64_get_encode_length(struct base64_context *context, const int nSrcLen);
/** base64 encode buffer
* parameters:
* context: the base64 context
* src: the source buffer
* nSrcLen: the source buffer length
* dest: the dest buffer
* dest_len: return dest buffer length
* bPad: if padding
* return: the encoded buffer
*/
char *base64_encode_ex(struct base64_context *context, const char *src, \
const int nSrcLen, char *dest, int *dest_len, const bool bPad);
/** base64 decode buffer, work only with padding source string
* parameters:
* context: the base64 context
* src: the source buffer with padding
* nSrcLen: the source buffer length
* dest: the dest buffer
* dest_len: return dest buffer length
* return: the decoded buffer
*/
char *base64_decode(struct base64_context *context, const char *src, \
const int nSrcLen, char *dest, int *dest_len);
/** base64 decode buffer, can work with no padding source string
* parameters:
* context: the base64 context
* src: the source buffer with padding or no padding
* nSrcLen: the source buffer length
* dest: the dest buffer
* dest_len: return dest buffer length
* return: the decoded buffer
*/
char *base64_decode_auto(struct base64_context *context, const char *src, \
const int nSrcLen, char *dest, int *dest_len);
/** set line separator string, such as \n or \r\n
* parameters:
* context: the base64 context
* pLineSeparator: the line separator string
* return: none
*/
void base64_set_line_separator(struct base64_context *context, \
const char *pLineSeparator);
/** set line length, 0 for never add line separators
* parameters:
* context: the base64 context
* length: the line length
* return: none
*/
void base64_set_line_length(struct base64_context *context, const int length);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,208 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include <unistd.h>
#include <fcntl.h>
#include <stdarg.h>
#include "shared_func.h"
#include "logger.h"
#include "buffered_file_writer.h"
int buffered_file_writer_open_ex(BufferedFileWriter *writer,
const char *filename, const int buffer_size,
const int max_written_once, const int mode)
{
int result;
int written_once;
writer->buffer_size = (buffer_size > 0) ? buffer_size : 64 * 1024;
written_once = (max_written_once > 0) ? max_written_once : 256;
if (written_once > writer->buffer_size)
{
logError("file: "__FILE__", line: %d, "
"max_written_once: %d > buffer_size: %d",
__LINE__, written_once, writer->buffer_size);
return EINVAL;
}
writer->buff = (char *)malloc(writer->buffer_size);
if (writer->buff == NULL)
{
logError("file: "__FILE__", line: %d, "
"malloc %d bytes fail",
__LINE__, writer->buffer_size);
return ENOMEM;
}
snprintf(writer->filename, sizeof(writer->filename), "%s", filename);
writer->fd = open(writer->filename, O_WRONLY | O_CREAT | O_TRUNC, mode);
if (writer->fd < 0)
{
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"open file %s fail, "
"errno: %d, error info: %s",
__LINE__, writer->filename,
result, STRERROR(result));
free(writer->buff);
writer->buff = NULL;
return result;
}
writer->current = writer->buff;
writer->buff_end = writer->buff + writer->buffer_size;
writer->water_mark = writer->buff_end - written_once;
return 0;
}
int buffered_file_writer_close(BufferedFileWriter *writer)
{
int result;
if (writer->buff == NULL)
{
return EINVAL;
}
result = buffered_file_writer_flush(writer);
if (result == 0 && fsync(writer->fd) != 0)
{
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"fsync file %s fail, "
"errno: %d, error info: %s",
__LINE__, writer->filename,
result, STRERROR(result));
}
if (close(writer->fd) != 0)
{
if (result == 0)
{
result = errno != 0 ? errno : EIO;
}
logError("file: "__FILE__", line: %d, "
"close file %s fail, "
"errno: %d, error info: %s",
__LINE__, writer->filename,
errno, STRERROR(errno));
}
free(writer->buff);
writer->buff = NULL;
return result;
}
int buffered_file_writer_flush(BufferedFileWriter *writer)
{
int result;
int len;
len = writer->current - writer->buff;
if (len == 0)
{
return 0;
}
if (fc_safe_write(writer->fd, writer->buff, len) != len)
{
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"write to file %s fail, "
"errno: %d, error info: %s", __LINE__,
writer->filename, result, STRERROR(result));
return result;
}
writer->current = writer->buff;
return 0;
}
int buffered_file_writer_append(BufferedFileWriter *writer,
const char *format, ...)
{
va_list ap;
int result;
int remain_size;
int len;
int i;
result = 0;
for (i=0; i<2; i++)
{
remain_size = writer->buff_end - writer->current;
va_start(ap, format);
len = vsnprintf(writer->current, remain_size, format, ap);
va_end(ap);
if (len < remain_size)
{
writer->current += len;
if (writer->current > writer->water_mark)
{
result = buffered_file_writer_flush(writer);
}
break;
}
if (len > writer->buffer_size)
{
result = ENOSPC;
logError("file: "__FILE__", line: %d, "
"too large output buffer, %d > %d!",
__LINE__, len, writer->buffer_size);
break;
}
//maybe full, try again
if ((result=buffered_file_writer_flush(writer)) != 0)
{
break;
}
}
return result;
}
int buffered_file_writer_append_buff(BufferedFileWriter *writer,
const char *buff, const int len)
{
int result;
if (len >= writer->water_mark - writer->current)
{
if ((result=buffered_file_writer_flush(writer)) != 0)
{
return result;
}
if (fc_safe_write(writer->fd, buff, len) != len)
{
result = errno != 0 ? errno : EIO;
logError("file: "__FILE__", line: %d, "
"write to file %s fail, "
"errno: %d, error info: %s", __LINE__,
writer->filename, result, STRERROR(result));
return result;
}
return 0;
}
memcpy(writer->current, buff, len);
writer->current += len;
return 0;
}

View File

@ -0,0 +1,72 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#ifndef BUFFERED_FILE_WRITER_H
#define BUFFERED_FILE_WRITER_H
#include "common_define.h"
typedef struct
{
int fd;
int buffer_size;
char filename[MAX_PATH_SIZE];
char *buff;
char *current;
char *buff_end;
char *water_mark;
} BufferedFileWriter;
#ifdef __cplusplus
extern "C" {
#endif
/** open buffered file writer
* parameters:
* writer: the writer
* filename: the filename to write
* buffer_size: the buffer size, <= 0 for recommend 64KB
* max_written_once: max written bytes per call, <= 0 for 256
* mode: the file privilege such as 0644
* return: error code, 0 for success, != 0 for errno
*/
int buffered_file_writer_open_ex(BufferedFileWriter *writer,
const char *filename, const int buffer_size,
const int max_written_once, const int mode);
static inline int buffered_file_writer_open(BufferedFileWriter *writer,
const char *filename)
{
const int buffer_size = 0;
const int max_written_once = 0;
const int mode = 0644;
return buffered_file_writer_open_ex(writer, filename,
buffer_size, max_written_once, mode);
}
/** close buffered file writer
* parameters:
* writer: the writer
* return: error code, 0 for success, != 0 for errno
*/
int buffered_file_writer_close(BufferedFileWriter *writer);
int buffered_file_writer_append(BufferedFileWriter *writer,
const char *format, ...);
int buffered_file_writer_append_buff(BufferedFileWriter *writer,
const char *buff, const int len);
int buffered_file_writer_flush(BufferedFileWriter *writer);
#ifdef __cplusplus
}
#endif
#endif

325
JCEC-fastdfs/fastcommon/chain.c Executable file
View File

@ -0,0 +1,325 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <errno.h>
#include "chain.h"
//#include "use_mmalloc.h"
void chain_init(ChainList *pList, const int type, FreeDataFunc freeDataFunc, \
CompareFunc compareFunc)
{
if (pList == NULL)
{
return;
}
pList->head = NULL;
pList->tail = NULL;
pList->type = type;
pList->freeDataFunc = freeDataFunc;
pList->compareFunc = compareFunc;
return;
}
void chain_destroy(ChainList *pList)
{
ChainNode *pNode;
ChainNode *pDeleted;
if (pList == NULL || pList->head == NULL)
{
return;
}
pNode = pList->head;
while (pNode != NULL)
{
pDeleted = pNode;
pNode = pNode->next;
freeChainNode(pList, pDeleted);
}
pList->head = pList->tail = NULL;
}
void freeChainNode(ChainList *pList, ChainNode *pChainNode)
{
if (pList->freeDataFunc != NULL)
{
pList->freeDataFunc(pChainNode->data);
}
free(pChainNode);
}
int insertNodePrior(ChainList *pList, void *data)
{
ChainNode *pNode;
if (pList == NULL)
{
return EINVAL;
}
pNode = (ChainNode *)malloc(sizeof(ChainNode));
if (pNode == NULL)
{
return ENOMEM;
}
pNode->data = data;
pNode->next = pList->head;
pList->head = pNode;
if (pList->tail == NULL)
{
pList->tail = pNode;
}
return 0;
}
int appendNode(ChainList *pList, void *data)
{
ChainNode *pNode;
if (pList == NULL)
{
return EINVAL;
}
pNode = (ChainNode *)malloc(sizeof(ChainNode));
if (pNode == NULL)
{
return ENOMEM;
}
pNode->data = data;
pNode->next = NULL;
if (pList->tail != NULL)
{
pList->tail->next = pNode;
}
pList->tail = pNode;
if (pList->head == NULL)
{
pList->head = pNode;
}
return 0;
}
int insertNodeAsc(ChainList *pList, void *data)
{
ChainNode *pNew;
ChainNode *pNode;
ChainNode *pPrevious;
if (pList == NULL || pList->compareFunc == NULL)
{
return EINVAL;
}
pNew = (ChainNode *)malloc(sizeof(ChainNode));
if (pNew == NULL)
{
return ENOMEM;
}
pNew->data = data;
pPrevious = NULL;
pNode = pList->head;
while (pNode != NULL && pList->compareFunc(pNode->data, data) < 0)
{
pPrevious = pNode;
pNode = pNode->next;
}
pNew->next = pNode;
if (pPrevious == NULL)
{
pList->head = pNew;
}
else
{
pPrevious->next = pNew;
}
if (pNode == NULL)
{
pList->tail = pNew;
}
return 0;
}
int addNode(ChainList *pList, void *data)
{
if (pList->type == CHAIN_TYPE_INSERT)
{
return insertNodePrior(pList, data);
}
else if (pList->type == CHAIN_TYPE_APPEND)
{
return appendNode(pList, data);
}
else
{
return insertNodeAsc(pList, data);
}
}
void deleteNodeEx(ChainList *pList, ChainNode *pPreviousNode, \
ChainNode *pDeletedNode)
{
if (pDeletedNode == pList->head)
{
pList->head = pDeletedNode->next;
}
else
{
pPreviousNode->next = pDeletedNode->next;
}
if (pDeletedNode == pList->tail)
{
pList->tail = pPreviousNode;
}
freeChainNode(pList, pDeletedNode);
}
void deleteToNodePrevious(ChainList *pList, ChainNode *pPreviousNode, \
ChainNode *pDeletedNext)
{
ChainNode *pNode;
ChainNode *pDeletedNode;
if (pPreviousNode == NULL)
{
pNode = pList->head;
pList->head = pDeletedNext;
}
else
{
pNode = pPreviousNode->next;
pPreviousNode->next = pDeletedNext;
}
while (pNode != NULL && pNode != pDeletedNext)
{
pDeletedNode = pNode;
pNode = pNode->next;
freeChainNode(pList, pDeletedNode);
}
if (pDeletedNext == NULL)
{
pList->tail = pPreviousNode;
}
}
void *chain_pop_head(ChainList *pList)
{
ChainNode *pDeletedNode;
void *data;
pDeletedNode = pList->head;
if (pDeletedNode == NULL)
{
return NULL;
}
pList->head = pDeletedNode->next;
if (pList->head == NULL)
{
pList->tail = NULL;
}
data = pDeletedNode->data;
free(pDeletedNode);
return data;
}
int chain_count(ChainList *pList)
{
ChainNode *pNode;
int count;
count = 0;
pNode = pList->head;
while (pNode != NULL)
{
count++;
pNode = pNode->next;
}
return count;
}
int deleteNode(ChainList *pList, void *data, bool bDeleteAll)
{
ChainNode *pNode;
ChainNode *pPrevious;
ChainNode *pDeleted;
int nCount;
int nCompareRes;
if (pList == NULL || pList->compareFunc == NULL)
{
return -EINVAL;
}
nCount = 0;
pPrevious = NULL;
pNode = pList->head;
while (pNode != NULL)
{
nCompareRes = pList->compareFunc(pNode->data, data);
if (nCompareRes == 0)
{
pDeleted = pNode;
pNode = pNode->next;
deleteNodeEx(pList, pPrevious, pDeleted);
nCount++;
if (!bDeleteAll)
{
break;
}
continue;
}
else if(nCompareRes > 0 && pList->type == CHAIN_TYPE_SORTED)
{
break;
}
pPrevious = pNode;
pNode = pNode->next;
}
return nCount;
}
int deleteOne(ChainList *pList, void *data)
{
return deleteNode(pList, data, false);
}
int deleteAll(ChainList *pList, void *data)
{
return deleteNode(pList, data, true);
}

148
JCEC-fastdfs/fastcommon/chain.h Executable file
View File

@ -0,0 +1,148 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#ifndef CHAIN_H
#define CHAIN_H
#include "common_define.h"
#define CHAIN_TYPE_INSERT 0 //insert new node before head
#define CHAIN_TYPE_APPEND 1 //insert new node after tail
#define CHAIN_TYPE_SORTED 2 //sorted chain
typedef struct tagChainNode
{
void *data;
struct tagChainNode *next;
} ChainNode;
typedef struct
{
int type;
ChainNode *head;
ChainNode *tail;
FreeDataFunc freeDataFunc;
CompareFunc compareFunc;
} ChainList;
#ifdef __cplusplus
extern "C" {
#endif
/** chain init function
* parameters:
* pList: the chain list
* type: chain type, value is one of following:
* CHAIN_TYPE_INSERT: insert new node before head
* CHAIN_TYPE_APPEND: insert new node after tail
* CHAIN_TYPE_SORTED: sorted chain
* freeDataFunc: free data function pointer, can be NULL
* compareFunc: compare data function pointer, can be NULL,
* must set when type is CHAIN_TYPE_SORTED
* return: none
*/
void chain_init(ChainList *pList, const int type, FreeDataFunc freeDataFunc, \
CompareFunc compareFunc);
/** destroy chain
* parameters:
* pList: the chain list
* return: none
*/
void chain_destroy(ChainList *pList);
/** get the chain node count
* parameters:
* pList: the chain list
* return: chain node count
*/
int chain_count(ChainList *pList);
/** add a new node to the chain
* parameters:
* pList: the chain list
* data: the data to add
* return: error no, 0 for success, != 0 fail
*/
int addNode(ChainList *pList, void *data);
/** free the chain node
* parameters:
* pList: the chain list
* pChainNode: the chain node to free
* return: none
*/
void freeChainNode(ChainList *pList, ChainNode *pChainNode);
/** delete the chain node
* parameters:
* pList: the chain list
* pPreviousNode: the previous chain node
* pDeletedNode: the chain node to delete
* return: none
*/
void deleteNodeEx(ChainList *pList, ChainNode *pPreviousNode, \
ChainNode *pDeletedNode);
/** delete the chain nodes from pPreviousNode->next to pDeletedNext
* (not including pDeletedNext)
* parameters:
* pList: the chain list
* pPreviousNode: the previous chain node, delete from pPreviousNode->next
* pDeletedNext: the chain node after the deleted node
* return: none
*/
void deleteToNodePrevious(ChainList *pList, ChainNode *pPreviousNode, \
ChainNode *pDeletedNext);
/** delete the chain node using data compare function
* parameters:
* pList: the chain list
* data: the first node whose data equals this will be deleted
* return: delete chain node count, < 0 fail
*/
int deleteOne(ChainList *pList, void *data);
/** delete all chain nodes using data compare function
* parameters:
* pList: the chain list
* data: the node whose data equals this will be deleted
* return: delete chain node count, < 0 fail
*/
int deleteAll(ChainList *pList, void *data);
/** pop up a chain nodes from chain head
* parameters:
* pList: the chain list
* return: the head node, return NULL when the chain is empty
*/
void *chain_pop_head(ChainList *pList);
/** add a chain nodes before the chain head
* parameters:
* pList: the chain list
* data: the node to insert
* return: error no, 0 for success, != 0 fail
*/
int insertNodePrior(ChainList *pList, void *data);
/** add a chain nodes after the chain tail
* parameters:
* pList: the chain list
* data: the node to insert
* return: error no, 0 for success, != 0 fail
*/
int appendNode(ChainList *pList, void *data);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,154 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <ctype.h>
#include <unistd.h>
#include <errno.h>
#include "logger.h"
#include "shared_func.h"
#include "char_convert_loader.h"
int char_convert_loader_init(FastCharConverter *pCharConverter,
const IniItem *items, const int count)
{
char_converter_init(pCharConverter, NULL, 0);
return char_convert_loader_add(pCharConverter, items, count);
}
int char_convert_loader_add(FastCharConverter *pCharConverter,
const IniItem *items, const int count)
{
const IniItem *pItem;
const IniItem *pEnd;
int result;
pEnd = items + count;
for (pItem=items; pItem<pEnd; pItem++) {
result = char_convert_loader_set_pair(pCharConverter,
pItem->name, pItem->value);
if (result != 0) {
return result;
}
}
return 0;
}
static int char_convert_loader_parse(const char *s, unsigned char *out_char)
{
int len;
len = strlen(s);
if (len == 1) {
*out_char = *s;
return 0;
}
if (*s != '\\') {
logError("file: "__FILE__", line: %d, "
"invalid char string: %s", __LINE__, s);
return EINVAL;
}
if (len == 2) {
switch (s[1]) {
case '0':
*out_char = '\0';
break;
case 'a':
*out_char = '\a';
break;
case 'b':
*out_char = '\b';
break;
case 't':
*out_char = '\t';
break;
case 'n':
*out_char = '\n';
break;
case 'v':
*out_char = '\v';
break;
case 'f':
*out_char = '\f';
break;
case 'r':
*out_char = '\r';
break;
case 's':
*out_char = ' ';
break;
case '\\':
*out_char = '\\';
break;
default:
logError("file: "__FILE__", line: %d, "
"invalid char string: %s", __LINE__, s);
return EINVAL;
}
return 0;
}
if (len != 4 || s[1] != 'x' || !isxdigit(s[2]) || !isxdigit(s[3])) {
logError("file: "__FILE__", line: %d, "
"invalid char string: %s, correct format: \\x##, "
"## for hex digital. eg. \\x20 for the SPACE char",
__LINE__, s);
return EINVAL;
}
*out_char = (unsigned char)strtol(s+2, NULL, 16);
return 0;
}
int char_convert_loader_set_pair(FastCharConverter *pCharConverter,
const char *src, const char *dest)
{
unsigned char src_char;
unsigned char dest_char;
int result;
if (src == NULL || *src == '\0') {
logError("file: "__FILE__", line: %d, "
"empty src string", __LINE__);
return EINVAL;
}
if (dest == NULL || *dest == '\0') {
logError("file: "__FILE__", line: %d, "
"empty dest string, src string: %s",
__LINE__, src);
return EINVAL;
}
if ((result=char_convert_loader_parse(src, &src_char)) != 0) {
return result;
}
if (*dest == '"') {
if (strlen(dest) != 4 || dest[1] != '\\' || dest[3] != '"') {
logError("file: "__FILE__", line: %d, "
"invalid dest string: %s, correct format: \"\\c\", "
"eg. \"\\t\"", __LINE__, src);
return EINVAL;
}
dest_char = dest[2];
char_converter_set_pair_ex(pCharConverter,
src_char, FAST_CHAR_OP_ADD_BACKSLASH, dest_char);
return 0;
}
if ((result=char_convert_loader_parse(dest, &dest_char)) != 0) {
return result;
}
char_converter_set_pair_ex(pCharConverter,
src_char, FAST_CHAR_OP_NO_BACKSLASH, dest_char);
return 0;
}

View File

@ -0,0 +1,72 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
//char_convert_loader.h
#ifndef CHAR_CONVERT_LOADER_H
#define CHAR_CONVERT_LOADER_H
#include <syslog.h>
#include <sys/time.h>
#include "common_define.h"
#include "ini_file_reader.h"
#include "char_converter.h"
#ifdef __cplusplus
extern "C" {
#endif
/**
* char converter init function
* parameters:
* pCharConverter: the char converter
* items: the char key value pairs
* count: the count of kv pairs
* return: 0 for success, != 0 fail
*/
int char_convert_loader_init(FastCharConverter *pCharConverter,
const IniItem *items, const int count);
/**
* char converter init function
* parameters:
* pCharConverter: the char converter
* items: the char key value pairs
* count: the count of kv pairs
* return: 0 for success, != 0 fail
*/
int char_convert_loader_add(FastCharConverter *pCharConverter,
const IniItem *items, const int count);
/**
* set char src and dest pair
* parameters:
* pCharConverter: the char converter
* src: the src string to parse
* dest: the dest string to parse
*
* Note:
* src and dest can be ASCII code as \x##, ## for hex digital,
* such as \x20 for the SPACE char
*
* dest can be a printable char, ASCII code as \x##,
* or quoted two chars as backslash follow by a char, such as "\t"
*
* extended backslash char pairs:
* \0 for the ASCII 0 character
* \s for the SPACE character
* return: 0 for success, != 0 fail
*/
int char_convert_loader_set_pair(FastCharConverter *pCharConverter,
const char *src, const char *dest);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,164 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <unistd.h>
#include <errno.h>
#include "logger.h"
#include "shared_func.h"
#include "char_converter.h"
int char_converter_init_ex(FastCharConverter *pCharConverter,
const FastCharPair *charPairs, const int count,
const unsigned op)
{
int i;
unsigned char src;
if (count > FAST_MAX_CHAR_COUNT)
{
logError("file: "__FILE__", line: %d, "
"count: %d is too large, exceeds %d!", __LINE__,
count, FAST_MAX_CHAR_COUNT);
return EINVAL;
}
memset(pCharConverter, 0, sizeof(FastCharConverter));
pCharConverter->count = count;
for (i=0; i<count; i++)
{
src = charPairs[i].src;
pCharConverter->char_table[src].op = op;
pCharConverter->char_table[src].dest = charPairs[i].dest;
}
return 0;
}
int std_space_char_converter_init(FastCharConverter *pCharConverter,
const unsigned char dest_base)
{
#define SPACE_CHAR_PAIR_COUNT1 7
int i;
FastCharPair pairs[SPACE_CHAR_PAIR_COUNT1];
pairs[0].src = '\0';
pairs[1].src = '\t';
pairs[2].src = '\n';
pairs[3].src = '\v';
pairs[4].src = '\f';
pairs[5].src = '\r';
pairs[6].src = ' ';
for (i=0; i<SPACE_CHAR_PAIR_COUNT1; i++) {
pairs[i].dest = dest_base + i;
}
return char_converter_init(pCharConverter, pairs, SPACE_CHAR_PAIR_COUNT1);
}
int std_spaces_add_backslash_converter_init(FastCharConverter *pCharConverter)
{
#define SPACE_CHAR_PAIR_COUNT2 8
FastCharPair pairs[SPACE_CHAR_PAIR_COUNT2];
pairs[0].src = '\0'; pairs[0].dest = '0';
pairs[1].src = '\t'; pairs[1].dest = 't';
pairs[2].src = '\n'; pairs[2].dest = 'n';
pairs[3].src = '\v'; pairs[3].dest = 'v';
pairs[4].src = '\f'; pairs[4].dest = 'f';
pairs[5].src = '\r'; pairs[5].dest = 'r';
pairs[6].src = ' '; pairs[6].dest = '-';
pairs[7].src = '\\'; pairs[7].dest = '\\';
return char_converter_init_ex(pCharConverter, pairs,
SPACE_CHAR_PAIR_COUNT2, FAST_CHAR_OP_ADD_BACKSLASH);
}
void char_converter_set_pair(FastCharConverter *pCharConverter,
const unsigned char src, const unsigned char dest)
{
char_converter_set_pair_ex(pCharConverter, src,
FAST_CHAR_OP_NO_BACKSLASH, dest);
}
void char_converter_set_pair_ex(FastCharConverter *pCharConverter,
const unsigned char src, const unsigned op, const unsigned char dest)
{
if (op == FAST_CHAR_OP_NONE) {
if (pCharConverter->char_table[src].op != FAST_CHAR_OP_NONE) {
--pCharConverter->count;
}
} else {
if (pCharConverter->char_table[src].op == FAST_CHAR_OP_NONE) {
++pCharConverter->count;
}
}
pCharConverter->char_table[src].op = op;
pCharConverter->char_table[src].dest = dest;
}
int fast_char_convert(FastCharConverter *pCharConverter,
const char *input, const int input_len,
char *output, int *out_len, const int out_size)
{
int count;
unsigned char *pi;
unsigned char *po;
unsigned char *end;
int out_size_sub1;
count = 0;
po = (unsigned char *)output;
if (out_size >= input_len) {
end = (unsigned char *)input + input_len;
} else {
end = (unsigned char *)input + out_size;
}
for (pi=(unsigned char *)input; pi<end; pi++) {
if (pCharConverter->char_table[*pi].op != FAST_CHAR_OP_NONE) {
if (pCharConverter->char_table[*pi].op == FAST_CHAR_OP_ADD_BACKSLASH) {
break;
}
*po++ = pCharConverter->char_table[*pi].dest;
++count;
} else {
*po++ = *pi;
}
}
if (pi == end) {
*out_len = po - (unsigned char *)output;
return count;
}
out_size_sub1 = out_size - 1;
for (; pi<end; pi++) {
if (po - (unsigned char *)output >= out_size_sub1) {
logDebug("file: "__FILE__", line: %d, "
"exceeds max size: %d", __LINE__, out_size);
break;
}
if (pCharConverter->char_table[*pi].op != FAST_CHAR_OP_NONE) {
if (pCharConverter->char_table[*pi].op == FAST_CHAR_OP_ADD_BACKSLASH) {
*po++ = '\\';
}
*po++ = pCharConverter->char_table[*pi].dest;
++count;
} else {
*po++ = *pi;
}
}
*out_len = po - (unsigned char *)output;
return count;
}

View File

@ -0,0 +1,141 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
//char_converter.h
#ifndef CHAR_CONVERTER_H
#define CHAR_CONVERTER_H
#include <syslog.h>
#include <sys/time.h>
#include "common_define.h"
#ifdef __cplusplus
extern "C" {
#endif
#define FAST_MAX_CHAR_COUNT 256
#define FAST_CHAR_OP_NONE 0
#define FAST_CHAR_OP_ADD_BACKSLASH 1
#define FAST_CHAR_OP_NO_BACKSLASH 2
typedef struct fast_char_pair
{
unsigned char src;
unsigned char dest;
} FastCharPair;
typedef struct fast_char_target
{
unsigned char op;
unsigned char dest;
} FastCharTarget;
typedef struct fast_char_converter
{
/*
* char pairs count
* */
int count;
/*
* char table to convert
* */
FastCharTarget char_table[FAST_MAX_CHAR_COUNT];
} FastCharConverter;
/**
* char converter init function
* parameters:
* pCharConverter: the char converter
* charPairs: the char pairs
* count: the count of char pairs
* op: the operator type
* return: 0 for success, != 0 fail
*/
int char_converter_init_ex(FastCharConverter *pCharConverter,
const FastCharPair *charPairs, const int count,
const unsigned op);
/**
* char converter init function
* parameters:
* pCharConverter: the char converter
* charPairs: the char pairs
* count: the count of char pairs
* return: 0 for success, != 0 fail
*/
static inline int char_converter_init(FastCharConverter *pCharConverter,
const FastCharPair *charPairs, const int count)
{
return char_converter_init_ex(pCharConverter, charPairs, count,
FAST_CHAR_OP_NO_BACKSLASH);
}
/**
* standard space chars to convert
* parameters:
* pCharConverter: the char converter
* dest_base: the dest base char
* return: 0 for success, != 0 fail
*/
int std_space_char_converter_init(FastCharConverter *pCharConverter,
const unsigned char dest_base);
/**
* standard space chars init to add backslash
* parameters:
* pCharConverter: the char converter
* return: 0 for success, != 0 fail
*/
int std_spaces_add_backslash_converter_init(FastCharConverter *pCharConverter);
/**
* set char pair to converter
* parameters:
* pCharConverter: the char converter
* src: the src char
* dest: the dest char
* return: none
*/
void char_converter_set_pair(FastCharConverter *pCharConverter,
const unsigned char src, const unsigned char dest);
/**
* set char pair to converter
* parameters:
* pCharConverter: the char converter
* src: the src char
* op: the operator type
* dest: the dest char
* return: none
*/
void char_converter_set_pair_ex(FastCharConverter *pCharConverter,
const unsigned char src, const unsigned op, const unsigned char dest);
/**
* char convert function
* parameters:
* pCharConverter: the char converter
* input: the input to convert
* input_len: the length of input
* output: the input to convert
* out_len: the length of output
* out_size: output buff size
* return: converted char count
*/
int fast_char_convert(FastCharConverter *pCharConverter,
const char *input, const int input_len,
char *output, int *out_len, const int out_size);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,163 @@
//common_blocked_queue.c
#include <errno.h>
#include <pthread.h>
#include <inttypes.h>
#include "logger.h"
#include "shared_func.h"
#include "pthread_func.h"
#include "common_blocked_queue.h"
int common_blocked_queue_init_ex(struct common_blocked_queue *queue,
const int alloc_elements_once)
{
int result;
if ((result=init_pthread_lock(&(queue->lock))) != 0)
{
logError("file: "__FILE__", line: %d, "
"init_pthread_lock fail, errno: %d, error info: %s",
__LINE__, result, STRERROR(result));
return result;
}
result = pthread_cond_init(&(queue->cond), NULL);
if (result != 0)
{
logError("file: "__FILE__", line: %d, "
"pthread_cond_init fail, "
"errno: %d, error info: %s",
__LINE__, result, STRERROR(result));
return result;
}
if ((result=fast_mblock_init_ex(&queue->mblock,
sizeof(struct common_blocked_node),
alloc_elements_once, NULL, NULL, false)) != 0)
{
return result;
}
queue->head = NULL;
queue->tail = NULL;
return 0;
}
void common_blocked_queue_destroy(struct common_blocked_queue *queue)
{
pthread_cond_destroy(&(queue->cond));
pthread_mutex_destroy(&(queue->lock));
}
int common_blocked_queue_push(struct common_blocked_queue *queue, void *data)
{
int result;
struct common_blocked_node *node;
bool notify;
if ((result=pthread_mutex_lock(&(queue->lock))) != 0)
{
logError("file: "__FILE__", line: %d, " \
"call pthread_mutex_lock fail, " \
"errno: %d, error info: %s", \
__LINE__, result, STRERROR(result));
return result;
}
node = (struct common_blocked_node *)fast_mblock_alloc_object(&queue->mblock);
if (node == NULL)
{
pthread_mutex_unlock(&(queue->lock));
return ENOMEM;
}
node->data = data;
node->next = NULL;
if (queue->tail == NULL)
{
queue->head = node;
notify = true;
}
else
{
queue->tail->next = node;
notify = false;
}
queue->tail = node;
if ((result=pthread_mutex_unlock(&(queue->lock))) != 0)
{
logError("file: "__FILE__", line: %d, " \
"call pthread_mutex_unlock fail, " \
"errno: %d, error info: %s", \
__LINE__, result, STRERROR(result));
}
if (notify)
{
pthread_cond_signal(&(queue->cond));
}
return 0;
}
void *common_blocked_queue_pop_ex(struct common_blocked_queue *queue,
const bool blocked)
{
struct common_blocked_node *node;
void *data;
int result;
if ((result=pthread_mutex_lock(&(queue->lock))) != 0)
{
logError("file: "__FILE__", line: %d, " \
"call pthread_mutex_lock fail, " \
"errno: %d, error info: %s", \
__LINE__, result, STRERROR(result));
return NULL;
}
do {
node = queue->head;
if (node == NULL)
{
if (!blocked)
{
data = NULL;
break;
}
pthread_cond_wait(&(queue->cond), &(queue->lock));
node = queue->head;
}
if (node != NULL)
{
queue->head = node->next;
if (queue->head == NULL)
{
queue->tail = NULL;
}
data = node->data;
fast_mblock_free_object(&queue->mblock, node);
}
else
{
data = NULL;
}
} while (0);
if ((result=pthread_mutex_unlock(&(queue->lock))) != 0)
{
logError("file: "__FILE__", line: %d, " \
"call pthread_mutex_unlock fail, " \
"errno: %d, error info: %s", \
__LINE__, result, STRERROR(result));
}
return data;
}

View File

@ -0,0 +1,82 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
//common_blocked_queue.h
#ifndef _COMMON_BLOCKED_QUEUE_H
#define _COMMON_BLOCKED_QUEUE_H
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include "common_define.h"
#include "fast_mblock.h"
struct common_blocked_node
{
void *data;
struct common_blocked_node *next;
};
struct common_blocked_queue
{
struct common_blocked_node *head;
struct common_blocked_node *tail;
struct fast_mblock_man mblock;
pthread_mutex_t lock;
pthread_cond_t cond;
};
#ifdef __cplusplus
extern "C" {
#endif
int common_blocked_queue_init_ex(struct common_blocked_queue *queue,
const int alloc_elements_once);
#define common_blocked_queue_init(queue) \
common_blocked_queue_init_ex(queue, 1024)
void common_blocked_queue_destroy(struct common_blocked_queue *queue);
static inline void common_blocked_queue_terminate(struct common_blocked_queue *queue)
{
pthread_cond_signal(&(queue->cond));
}
static inline void common_blocked_queue_terminate_all(
struct common_blocked_queue *queue, const int count)
{
int i;
for (i=0; i<count; i++)
{
pthread_cond_signal(&(queue->cond));
}
}
int common_blocked_queue_push(struct common_blocked_queue *queue, void *data);
void *common_blocked_queue_pop_ex(struct common_blocked_queue *queue,
const bool blocked);
static inline void *common_blocked_queue_pop(struct common_blocked_queue *queue)
{
return common_blocked_queue_pop_ex(queue, true);
}
static inline void *common_blocked_queue_try_pop(struct common_blocked_queue *queue)
{
return common_blocked_queue_pop_ex(queue, false);
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,296 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
//common_define.h
#ifndef _COMMON_DEFINE_H_
#define _COMMON_DEFINE_H_
#include <pthread.h>
#include <string.h>
#include <errno.h>
#ifdef WIN32
#include <windows.h>
#include <winsock.h>
typedef UINT in_addr_t;
#define FILE_SEPERATOR "\\"
#define THREAD_ENTRANCE_FUNC_DECLARE DWORD WINAPI
#define THREAD_RETURN_VALUE 0
typedef DWORD (WINAPI *ThreadEntranceFunc)(LPVOID lpThreadParameter);
#else
#include <unistd.h>
#include <signal.h>
#include <stdbool.h>
#include <inttypes.h>
#include <netinet/in.h>
#include <arpa/inet.h>
#include <sys/types.h>
#include <sys/socket.h>
#define FILE_SEPERATOR "/"
typedef int SOCKET;
#define closesocket close
#define INVALID_SOCKET -1
#define THREAD_ENTRANCE_FUNC_DECLARE void *
typedef void *LPVOID;
#define THREAD_RETURN_VALUE NULL
typedef void * (*ThreadEntranceFunc)(LPVOID lpThreadParameter);
#endif
#ifndef WIN32
extern int pthread_mutexattr_settype(pthread_mutexattr_t *attr, int kind);
#endif
#include "_os_define.h"
#ifdef OS_LINUX
#ifndef PTHREAD_MUTEX_ERRORCHECK
#define PTHREAD_MUTEX_ERRORCHECK PTHREAD_MUTEX_ERRORCHECK_NP
#endif
#endif
#ifdef OS_BITS
#if OS_BITS == 64
#define INT64_PRINTF_FORMAT "%ld"
#else
#define INT64_PRINTF_FORMAT "%lld"
#endif
#else
#define INT64_PRINTF_FORMAT "%lld"
#endif
#ifdef OFF_BITS
#if OFF_BITS == 64
#define OFF_PRINTF_FORMAT INT64_PRINTF_FORMAT
#else
#define OFF_PRINTF_FORMAT "%d"
#endif
#else
#define OFF_PRINTF_FORMAT INT64_PRINTF_FORMAT
#endif
#ifndef WIN32
#define USE_SENDFILE
#endif
#define MAX_PATH_SIZE 256
#define LOG_FILE_DIR "logs"
#define CONF_FILE_DIR "conf"
#define DEFAULT_CONNECT_TIMEOUT 10
#define DEFAULT_NETWORK_TIMEOUT 30
#define DEFAULT_MAX_CONNECTONS 1024
#define DEFAULT_WORK_THREADS 4
#define SYNC_LOG_BUFF_DEF_INTERVAL 10
#define TIME_NONE -1
#define IP_ADDRESS_SIZE 16
#define INFINITE_FILE_SIZE (256 * 1024LL * 1024 * 1024 * 1024 * 1024LL)
#ifndef byte
#define byte signed char
#endif
#ifndef ubyte
#define ubyte unsigned char
#endif
#ifndef WIN32
#ifndef INADDR_NONE
#define INADDR_NONE ((in_addr_t) 0xffffffff)
#endif
#endif
#ifndef ECANCELED
#define ECANCELED 125
#endif
#ifndef ENONET
#define ENONET 64 /* Machine is not on the network */
#endif
#define IS_UPPER_HEX(ch) ((ch >= '0' && ch <= '9') || (ch >= 'A' && ch <= 'F'))
#define IS_HEX_CHAR(ch) (IS_UPPER_HEX(ch) || (ch >= 'a' && ch <= 'f'))
#define FC_IS_DIGITAL(ch) (ch >= '0' && ch <= '9')
#define FC_IS_LETTER(ch) ((ch >= 'A' && ch <= 'Z') || (ch >= 'a' && ch <= 'z'))
#define FC_IS_UPPER_LETTER(ch) (ch >= 'A' && ch <= 'Z')
#define FC_IS_LOWER_LETTER(ch) (ch >= 'a' && ch <= 'z')
#define STRERROR(no) (strerror(no) != NULL ? strerror(no) : "Unkown error")
#if defined(OS_LINUX)
#if defined __USE_MISC || defined __USE_XOPEN2K8
#define st_atimensec st_atim.tv_nsec
#define st_mtimensec st_mtim.tv_nsec
#define st_ctimensec st_ctim.tv_nsec
#endif
#elif defined(OS_FREEBSD)
#define st_atimensec st_atimespec.tv_nsec
#define st_mtimensec st_mtimespec.tv_nsec
#define st_ctimensec st_ctimespec.tv_nsec
#endif
#ifdef __cplusplus
extern "C" {
#endif
typedef struct
{
byte hour;
byte minute;
byte second;
} TimeInfo;
typedef struct
{
char major;
char minor;
char patch;
} Version;
typedef struct
{
char *key;
char *value;
} KeyValuePair;
typedef struct
{
char *key;
char *value;
int key_len;
int value_len;
} KeyValuePairEx;
typedef struct
{
char *buff;
int alloc_size;
int length;
} BufferInfo;
typedef struct
{
char *str;
int len;
} string_t;
typedef struct
{
string_t *strings;
int count;
} string_array_t;
typedef struct
{
string_t key;
string_t value;
} key_value_pair_t;
typedef struct
{
key_value_pair_t *kv_pairs;
int count;
} key_value_array_t;
typedef void (*FreeDataFunc)(void *ptr);
typedef int (*CompareFunc)(void *p1, void *p2);
typedef void* (*MallocFunc)(size_t size);
#define TO_UPPERCASE(c) (((c) >= 'a' && (c) <= 'z') ? (c) - 32 : c)
#define MEM_ALIGN(x) (((x) + 7) & (~7))
#ifdef WIN32
#define strcasecmp _stricmp
#endif
#ifndef likely
#if defined(__GNUC__) && __GNUC__ >= 3
#define likely(cond) __builtin_expect ((cond), 1)
#define unlikely(cond) __builtin_expect ((cond), 0)
#else
#define likely(cond) (cond)
#define unlikely(cond) (cond)
#endif
#endif
#ifdef __GNUC__
#define __gcc_attribute__ __attribute__
#else
#define __gcc_attribute__(x)
#endif
#define FC_IS_CHINESE_UTF8_CHAR(p, end) \
((p + 2 < end) && \
((((unsigned char)*p) & 0xF0) == 0xE0) && \
((((unsigned char)*(p + 1)) & 0xC0) == 0x80) && \
((((unsigned char)*(p + 2)) & 0xC0) == 0x80))
//for printf format %.*s
#define FC_PRINTF_STAR_STRING_PARAMS(s) (s).len, (s).str
#define FC_SET_STRING(dest, src) \
do { \
(dest).str = src; \
(dest).len = strlen(src); \
} while (0)
#define FC_SET_STRING_EX(dest, s, l) \
do { \
(dest).str = s; \
(dest).len = l; \
} while (0)
#define FC_SET_STRING_NULL(dest) \
do { \
(dest).str = NULL; \
(dest).len = 0; \
} while (0)
#define FC_IS_NULL_STRING(s) ((s)->str == NULL)
#define FC_IS_EMPTY_STRING(s) ((s)->len == 0)
#define fc_compare_string(s1, s2) fc_string_compare(s1, s2)
static inline int fc_string_compare(const string_t *s1, const string_t *s2)
{
int result;
if (s1->len == s2->len) {
return memcmp(s1->str, s2->str, s1->len);
} else if (s1->len < s2->len) {
result = memcmp(s1->str, s2->str, s1->len);
return result == 0 ? -1 : result;
} else {
result = memcmp(s1->str, s2->str, s2->len);
return result == 0 ? 1 : result;
}
}
static inline bool fc_string_equal(const string_t *s1, const string_t *s2)
{
return (s1->len == s2->len) && (memcmp(s1->str, s2->str, s1->len) == 0);
}
static inline bool fc_string_equal2(const string_t *s1,
const char *str2, const int len2)
{
return (s1->len == len2) && (memcmp(s1->str, str2, s1->len) == 0);
}
#define fc_string_equals(s1, s2) fc_string_equal(s1, s2)
#define fc_string_equals2(s1, str2, len2) fc_string_equal2(s1, str2, len2)
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,461 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
#include <netdb.h>
#include <unistd.h>
#include <errno.h>
#include "logger.h"
#include "sockopt.h"
#include "shared_func.h"
#include "sched_thread.h"
#include "connection_pool.h"
int conn_pool_init_ex(ConnectionPool *cp, int connect_timeout, \
const int max_count_per_entry, const int max_idle_time,
const int socket_domain)
{
int result;
if ((result=init_pthread_lock(&cp->lock)) != 0)
{
return result;
}
cp->connect_timeout = connect_timeout;
cp->max_count_per_entry = max_count_per_entry;
cp->max_idle_time = max_idle_time;
cp->socket_domain = socket_domain;
return hash_init(&(cp->hash_array), simple_hash, 1024, 0.75);
}
int conn_pool_init(ConnectionPool *cp, int connect_timeout,
const int max_count_per_entry, const int max_idle_time)
{
const int socket_domain = AF_INET;
return conn_pool_init_ex(cp, connect_timeout, max_count_per_entry,
max_idle_time, socket_domain);
}
int coon_pool_close_connections(const int index, const HashData *data, void *args)
{
ConnectionManager *cm;
cm = (ConnectionManager *)data->value;
if (cm != NULL)
{
ConnectionNode *node;
ConnectionNode *deleted;
node = cm->head;
while (node != NULL)
{
deleted = node;
node = node->next;
conn_pool_disconnect_server(deleted->conn);
free(deleted);
}
free(cm);
}
return 0;
}
void conn_pool_destroy(ConnectionPool *cp)
{
pthread_mutex_lock(&cp->lock);
hash_walk(&(cp->hash_array), coon_pool_close_connections, NULL);
hash_destroy(&(cp->hash_array));
pthread_mutex_unlock(&cp->lock);
pthread_mutex_destroy(&cp->lock);
}
void conn_pool_disconnect_server(ConnectionInfo *pConnection)
{
if (pConnection->sock >= 0)
{
close(pConnection->sock);
pConnection->sock = -1;
}
}
int conn_pool_connect_server_ex(ConnectionInfo *pConnection,
const int connect_timeout, const char *bind_ipaddr,
const bool log_connect_error)
{
int result;
int domain;
if (pConnection->sock >= 0)
{
close(pConnection->sock);
}
if (pConnection->socket_domain == AF_INET ||
pConnection->socket_domain == AF_INET6)
{
domain = pConnection->socket_domain;
}
else
{
domain = is_ipv6_addr(pConnection->ip_addr) ? AF_INET6 : AF_INET;
}
pConnection->sock = socket(domain, SOCK_STREAM, 0);
if(pConnection->sock < 0)
{
logError("file: "__FILE__", line: %d, "
"socket create fail, errno: %d, "
"error info: %s", __LINE__, errno, STRERROR(errno));
return errno != 0 ? errno : EPERM;
}
if (bind_ipaddr != NULL && *bind_ipaddr != '\0')
{
if ((result=socketBind2(domain, pConnection->sock, bind_ipaddr, 0)) != 0)
{
return result;
}
}
SET_SOCKOPT_NOSIGPIPE(pConnection->sock);
if ((result=tcpsetnonblockopt(pConnection->sock)) != 0)
{
close(pConnection->sock);
pConnection->sock = -1;
return result;
}
if ((result=connectserverbyip_nb(pConnection->sock,
pConnection->ip_addr, pConnection->port,
connect_timeout)) != 0)
{
if (log_connect_error)
{
logError("file: "__FILE__", line: %d, "
"connect to server %s:%d fail, errno: %d, "
"error info: %s", __LINE__, pConnection->ip_addr,
pConnection->port, result, STRERROR(result));
}
close(pConnection->sock);
pConnection->sock = -1;
return result;
}
return 0;
}
static inline void conn_pool_get_key(const ConnectionInfo *conn, char *key, int *key_len)
{
*key_len = sprintf(key, "%s_%d", conn->ip_addr, conn->port);
}
ConnectionInfo *conn_pool_get_connection(ConnectionPool *cp,
const ConnectionInfo *conn, int *err_no)
{
char key[INET6_ADDRSTRLEN + 8];
int key_len;
int bytes;
char *p;
ConnectionManager *cm;
ConnectionNode *node;
ConnectionInfo *ci;
time_t current_time;
conn_pool_get_key(conn, key, &key_len);
pthread_mutex_lock(&cp->lock);
cm = (ConnectionManager *)hash_find(&cp->hash_array, key, key_len);
if (cm == NULL)
{
cm = (ConnectionManager *)malloc(sizeof(ConnectionManager));
if (cm == NULL)
{
*err_no = errno != 0 ? errno : ENOMEM;
logError("file: "__FILE__", line: %d, " \
"malloc %d bytes fail, errno: %d, " \
"error info: %s", __LINE__, \
(int)sizeof(ConnectionManager), \
*err_no, STRERROR(*err_no));
pthread_mutex_unlock(&cp->lock);
return NULL;
}
cm->head = NULL;
cm->total_count = 0;
cm->free_count = 0;
if ((*err_no=init_pthread_lock(&cm->lock)) != 0)
{
pthread_mutex_unlock(&cp->lock);
return NULL;
}
hash_insert(&cp->hash_array, key, key_len, cm);
}
pthread_mutex_unlock(&cp->lock);
current_time = get_current_time();
pthread_mutex_lock(&cm->lock);
while (1)
{
if (cm->head == NULL)
{
if ((cp->max_count_per_entry > 0) &&
(cm->total_count >= cp->max_count_per_entry))
{
*err_no = ENOSPC;
logError("file: "__FILE__", line: %d, " \
"connections: %d of server %s:%d " \
"exceed limit: %d", __LINE__, \
cm->total_count, conn->ip_addr, \
conn->port, cp->max_count_per_entry);
pthread_mutex_unlock(&cm->lock);
return NULL;
}
bytes = sizeof(ConnectionNode) + sizeof(ConnectionInfo);
p = (char *)malloc(bytes);
if (p == NULL)
{
*err_no = errno != 0 ? errno : ENOMEM;
logError("file: "__FILE__", line: %d, " \
"malloc %d bytes fail, errno: %d, " \
"error info: %s", __LINE__, \
bytes, *err_no, STRERROR(*err_no));
pthread_mutex_unlock(&cm->lock);
return NULL;
}
node = (ConnectionNode *)p;
node->conn = (ConnectionInfo *)(p + sizeof(ConnectionNode));
node->manager = cm;
node->next = NULL;
node->atime = 0;
cm->total_count++;
pthread_mutex_unlock(&cm->lock);
memcpy(node->conn, conn, sizeof(ConnectionInfo));
node->conn->socket_domain = cp->socket_domain;
node->conn->sock = -1;
*err_no = conn_pool_connect_server(node->conn, \
cp->connect_timeout);
if (*err_no != 0)
{
pthread_mutex_lock(&cm->lock);
cm->total_count--; //rollback
pthread_mutex_unlock(&cm->lock);
free(p);
return NULL;
}
logDebug("file: "__FILE__", line: %d, " \
"server %s:%d, new connection: %d, " \
"total_count: %d, free_count: %d", \
__LINE__, conn->ip_addr, conn->port, \
node->conn->sock, cm->total_count, \
cm->free_count);
return node->conn;
}
else
{
node = cm->head;
ci = node->conn;
cm->head = node->next;
cm->free_count--;
if (current_time - node->atime > cp->max_idle_time)
{
cm->total_count--;
logDebug("file: "__FILE__", line: %d, " \
"server %s:%d, connection: %d idle " \
"time: %d exceeds max idle time: %d, "\
"total_count: %d, free_count: %d", \
__LINE__, conn->ip_addr, conn->port, \
ci->sock, \
(int)(current_time - node->atime), \
cp->max_idle_time, cm->total_count, \
cm->free_count);
conn_pool_disconnect_server(ci);
free(node);
continue;
}
pthread_mutex_unlock(&cm->lock);
logDebug("file: "__FILE__", line: %d, " \
"server %s:%d, reuse connection: %d, " \
"total_count: %d, free_count: %d",
__LINE__, conn->ip_addr, conn->port,
ci->sock, cm->total_count, cm->free_count);
*err_no = 0;
return ci;
}
}
}
int conn_pool_close_connection_ex(ConnectionPool *cp, ConnectionInfo *conn,
const bool bForce)
{
char key[INET6_ADDRSTRLEN + 8];
int key_len;
ConnectionManager *cm;
ConnectionNode *node;
conn_pool_get_key(conn, key, &key_len);
pthread_mutex_lock(&cp->lock);
cm = (ConnectionManager *)hash_find(&cp->hash_array, key, key_len);
pthread_mutex_unlock(&cp->lock);
if (cm == NULL)
{
logError("file: "__FILE__", line: %d, " \
"hash entry of server %s:%d not exist", __LINE__, \
conn->ip_addr, conn->port);
return ENOENT;
}
node = (ConnectionNode *)(((char *)conn) - sizeof(ConnectionNode));
if (node->manager != cm)
{
logError("file: "__FILE__", line: %d, " \
"manager of server entry %s:%d is invalid!", \
__LINE__, conn->ip_addr, conn->port);
return EINVAL;
}
pthread_mutex_lock(&cm->lock);
if (bForce)
{
cm->total_count--;
logDebug("file: "__FILE__", line: %d, " \
"server %s:%d, release connection: %d, " \
"total_count: %d, free_count: %d",
__LINE__, conn->ip_addr, conn->port,
conn->sock, cm->total_count, cm->free_count);
conn_pool_disconnect_server(conn);
free(node);
}
else
{
node->atime = get_current_time();
node->next = cm->head;
cm->head = node;
cm->free_count++;
logDebug("file: "__FILE__", line: %d, " \
"server %s:%d, free connection: %d, " \
"total_count: %d, free_count: %d",
__LINE__, conn->ip_addr, conn->port,
conn->sock, cm->total_count, cm->free_count);
}
pthread_mutex_unlock(&cm->lock);
return 0;
}
static int _conn_count_walk(const int index, const HashData *data, void *args)
{
int *count;
ConnectionManager *cm;
ConnectionNode *node;
count = (int *)args;
cm = (ConnectionManager *)data->value;
node = cm->head;
while (node != NULL)
{
(*count)++;
node = node->next;
}
return 0;
}
int conn_pool_get_connection_count(ConnectionPool *cp)
{
int count;
count = 0;
hash_walk(&cp->hash_array, _conn_count_walk, &count);
return count;
}
int conn_pool_parse_server_info(const char *pServerStr,
ConnectionInfo *pServerInfo, const int default_port)
{
char *parts[2];
char server_info[256];
int len;
int count;
len = strlen(pServerStr);
if (len == 0) {
logError("file: "__FILE__", line: %d, "
"host \"%s\" is empty!",
__LINE__, pServerStr);
return EINVAL;
}
if (len >= sizeof(server_info)) {
logError("file: "__FILE__", line: %d, "
"host \"%s\" is too long!",
__LINE__, pServerStr);
return ENAMETOOLONG;
}
memcpy(server_info, pServerStr, len);
*(server_info + len) = '\0';
count = splitEx(server_info, ':', parts, 2);
if (count == 1) {
pServerInfo->port = default_port;
}
else {
char *endptr = NULL;
pServerInfo->port = (int)strtol(parts[1], &endptr, 10);
if ((endptr != NULL && *endptr != '\0') || pServerInfo->port <= 0) {
logError("file: "__FILE__", line: %d, "
"host: %s, invalid port: %s!",
__LINE__, pServerStr, parts[1]);
return EINVAL;
}
}
if (getIpaddrByName(parts[0], pServerInfo->ip_addr,
sizeof(pServerInfo->ip_addr)) == INADDR_NONE)
{
logError("file: "__FILE__", line: %d, "
"host: %s, invalid hostname: %s!",
__LINE__, pServerStr, parts[0]);
return EINVAL;
}
pServerInfo->socket_domain = AF_INET;
pServerInfo->sock = -1;
return 0;
}
int conn_pool_load_server_info(IniContext *pIniContext, const char *filename,
const char *item_name, ConnectionInfo *pServerInfo,
const int default_port)
{
char *pServerStr;
pServerStr = iniGetStrValue(NULL, item_name, pIniContext);
if (pServerStr == NULL) {
logError("file: "__FILE__", line: %d, "
"config file: %s, item \"%s\" not exist!",
__LINE__, filename, item_name);
return ENOENT;
}
return conn_pool_parse_server_info(pServerStr, pServerInfo, default_port);
}

View File

@ -0,0 +1,242 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
//connection_pool.h
#ifndef _CONNECTION_POOL_H
#define _CONNECTION_POOL_H
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include "common_define.h"
#include "pthread_func.h"
#include "hash.h"
#include "ini_file_reader.h"
#ifdef __cplusplus
extern "C" {
#endif
#define FC_CONNECTION_SERVER_EQUAL(conn, target_ip, target_port) \
(strcmp((conn).ip_addr, target_ip) == 0 && \
(conn).port == target_port)
#define FC_CONNECTION_SERVER_EQUAL1(conn1, conn2) \
(strcmp((conn1).ip_addr, (conn2).ip_addr) == 0 && \
(conn1).port == (conn2).port)
typedef struct
{
int sock;
int port;
char ip_addr[INET6_ADDRSTRLEN];
int socket_domain; //socket domain, AF_INET, AF_INET6 or AF_UNSPEC for auto dedect
} ConnectionInfo;
struct tagConnectionManager;
typedef struct tagConnectionNode {
ConnectionInfo *conn;
struct tagConnectionManager *manager;
struct tagConnectionNode *next;
time_t atime; //last access time
} ConnectionNode;
typedef struct tagConnectionManager {
ConnectionNode *head;
int total_count; //total connections
int free_count; //free connections
pthread_mutex_t lock;
} ConnectionManager;
typedef struct tagConnectionPool {
HashArray hash_array; //key is ip:port, value is ConnectionManager
pthread_mutex_t lock;
int connect_timeout;
int max_count_per_entry; //0 means no limit
/*
connections whose the idle time exceeds this time will be closed
unit: second
*/
int max_idle_time;
int socket_domain; //socket domain
} ConnectionPool;
/**
* init ex function
* parameters:
* cp: the ConnectionPool
* connect_timeout: the connect timeout in seconds
* max_count_per_entry: max connection count per host:port
* max_idle_time: reconnect the server after max idle time in seconds
* socket_domain: the socket domain
* return 0 for success, != 0 for error
*/
int conn_pool_init_ex(ConnectionPool *cp, int connect_timeout,
const int max_count_per_entry, const int max_idle_time,
const int socket_domain);
/**
* init function
* parameters:
* cp: the ConnectionPool
* connect_timeout: the connect timeout in seconds
* max_count_per_entry: max connection count per host:port
* max_idle_time: reconnect the server after max idle time in seconds
* return 0 for success, != 0 for error
*/
int conn_pool_init(ConnectionPool *cp, int connect_timeout,
const int max_count_per_entry, const int max_idle_time);
/**
* destroy function
* parameters:
* cp: the ConnectionPool
* return none
**/
void conn_pool_destroy(ConnectionPool *cp);
/**
* get connection from the pool
* parameters:
* cp: the ConnectionPool
* conn: the connection
* err_no: return the the errno, 0 for success
* return != NULL for success, NULL for error
*/
ConnectionInfo *conn_pool_get_connection(ConnectionPool *cp,
const ConnectionInfo *conn, int *err_no);
#define conn_pool_close_connection(cp, conn) \
conn_pool_close_connection_ex(cp, conn, false)
/**
* push back the connection to pool
* parameters:
* cp: the ConnectionPool
* conn: the connection
* bForce: set true to close the socket, else only push back to connection pool
* return 0 for success, != 0 for error
*/
int conn_pool_close_connection_ex(ConnectionPool *cp, ConnectionInfo *conn,
const bool bForce);
/**
* disconnect from the server
* parameters:
* pConnection: the connection
* return 0 for success, != 0 for error
*/
void conn_pool_disconnect_server(ConnectionInfo *pConnection);
/**
* connect to the server
* parameters:
* pConnection: the connection
* connect_timeout: the connect timeout in seconds
* bind_ipaddr: the ip address to bind, NULL or empty for any
* log_connect_error: if log error info when connect fail
* NOTE: pConnection->sock will be closed when it >= 0 before connect
* return 0 for success, != 0 for error
*/
int conn_pool_connect_server_ex(ConnectionInfo *pConnection,
const int connect_timeout, const char *bind_ipaddr,
const bool log_connect_error);
/**
* connect to the server
* parameters:
* pConnection: the connection
* connect_timeout: the connect timeout in seconds
* NOTE: pConnection->sock will be closed when it >= 0 before connect
* return 0 for success, != 0 for error
*/
static inline int conn_pool_connect_server(ConnectionInfo *pConnection,
const int connect_timeout)
{
const char *bind_ipaddr = NULL;
return conn_pool_connect_server_ex(pConnection,
connect_timeout, bind_ipaddr, true);
}
/**
* connect to the server
* parameters:
* pConnection: the connection
* connect_timeout: the connect timeout in seconds
* return 0 for success, != 0 for error
*/
static inline int conn_pool_connect_server_anyway(ConnectionInfo *pConnection,
const int connect_timeout)
{
const char *bind_ipaddr = NULL;
pConnection->sock = -1;
return conn_pool_connect_server_ex(pConnection,
connect_timeout, bind_ipaddr, true);
}
/**
* get connection count of the pool
* parameters:
* cp: the ConnectionPool
* return current connection count
*/
int conn_pool_get_connection_count(ConnectionPool *cp);
/**
* load server info from config file
* parameters:
* pIniContext: the ini context
* filename: the config filename
* item_name: the item name in config file, format item_name=server:port
* pServerInfo: store server info
* default_port: the default port
* return 0 for success, != 0 for error
*/
int conn_pool_load_server_info(IniContext *pIniContext, const char *filename,
const char *item_name, ConnectionInfo *pServerInfo,
const int default_port);
/**
* parse server info from string
* parameters:
* pServerStr: server and port string as server:port
* pServerInfo: store server info
* default_port: the default port
* return 0 for success, != 0 for error
*/
int conn_pool_parse_server_info(const char *pServerStr,
ConnectionInfo *pServerInfo, const int default_port);
/**
* set server info with ip address and port
* parameters:
* pServerInfo: store server info
* ip_addr: the ip address
* port: the port
* return none
*/
static inline void conn_pool_set_server_info(ConnectionInfo *pServerInfo,
const char *ip_addr, const int port)
{
snprintf(pServerInfo->ip_addr, sizeof(pServerInfo->ip_addr),
"%s", ip_addr);
pServerInfo->port = port;
pServerInfo->socket_domain = AF_UNSPEC;
pServerInfo->sock = -1;
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,497 @@
//fast_allocator.c
#include <errno.h>
#include <stdlib.h>
#include <pthread.h>
#include "logger.h"
#include "shared_func.h"
#include "sched_thread.h"
#include "fast_allocator.h"
#define BYTES_ALIGN(x, pad_mask) (((x) + pad_mask) & (~pad_mask))
struct allocator_wrapper {
int alloc_bytes;
short allocator_index;
short magic_number;
};
static struct fast_allocator_info malloc_allocator;
#define ADD_ALLOCATOR_TO_ARRAY(acontext, allocator, _pooled) \
do { \
(allocator)->index = acontext->allocator_array.count; \
(allocator)->magic_number = rand(); \
(allocator)->pooled = _pooled; \
acontext->allocator_array.allocators[ \
acontext->allocator_array.count++] = allocator; \
/* logInfo("count: %d, magic_number: %d", acontext->allocator_array.count, (allocator)->magic_number); */\
} while (0)
static int fast_allocator_malloc_trunk_check(const int alloc_bytes, void *args)
{
struct fast_allocator_context *acontext;
acontext = (struct fast_allocator_context *)args;
if (acontext->alloc_bytes_limit == 0)
{
return 0;
}
if (acontext->alloc_bytes + alloc_bytes > acontext->alloc_bytes_limit)
{
return EOVERFLOW;
}
return acontext->allocator_array.malloc_bytes + alloc_bytes <=
acontext->allocator_array.malloc_bytes_limit ? 0 : EOVERFLOW;
}
static void fast_allocator_malloc_trunk_notify_func(const int alloc_bytes, void *args)
{
if (alloc_bytes > 0)
{
__sync_add_and_fetch(&((struct fast_allocator_context *)args)->
allocator_array.malloc_bytes, alloc_bytes);
}
else
{
__sync_sub_and_fetch(&((struct fast_allocator_context *)args)->
allocator_array.malloc_bytes, -1 * alloc_bytes);
}
}
static int allocator_array_check_capacity(struct fast_allocator_context *acontext,
const int allocator_count)
{
int result;
int bytes;
struct fast_allocator_info **new_allocators;
if (acontext->allocator_array.alloc >= acontext->allocator_array.count +
allocator_count)
{
return 0;
}
if (acontext->allocator_array.alloc == 0)
{
acontext->allocator_array.alloc = 2 * allocator_count;
}
else
{
do
{
acontext->allocator_array.alloc *= 2;
} while (acontext->allocator_array.alloc < allocator_count);
}
bytes = sizeof(struct fast_allocator_info*) * acontext->allocator_array.alloc;
new_allocators = (struct fast_allocator_info **)malloc(bytes);
if (new_allocators == NULL)
{
result = errno != 0 ? errno : ENOMEM;
logError("file: "__FILE__", line: %d, "
"malloc %d bytes fail, errno: %d, error info: %s",
__LINE__, bytes, result, STRERROR(result));
return result;
}
if (acontext->allocator_array.allocators != NULL)
{
memcpy(new_allocators, acontext->allocator_array.allocators,
sizeof(struct fast_allocator_info *) *
acontext->allocator_array.count);
free(acontext->allocator_array.allocators);
}
acontext->allocator_array.allocators = new_allocators;
return 0;
}
static int region_init(struct fast_allocator_context *acontext,
struct fast_region_info *region)
{
int result;
int bytes;
int element_size;
int allocator_count;
struct fast_allocator_info *allocator;
region->pad_mask = region->step - 1;
allocator_count = (region->end - region->start) / region->step;
bytes = sizeof(struct fast_allocator_info) * allocator_count;
region->allocators = (struct fast_allocator_info *)malloc(bytes);
if (region->allocators == NULL)
{
result = errno != 0 ? errno : ENOMEM;
logError("file: "__FILE__", line: %d, "
"malloc %d bytes fail, errno: %d, error info: %s",
__LINE__, bytes, result, STRERROR(result));
return result;
}
memset(region->allocators, 0, bytes);
if ((result=allocator_array_check_capacity(acontext, allocator_count)) != 0)
{
return result;
}
result = 0;
allocator = region->allocators;
for (element_size=region->start+region->step; element_size<=region->end;
element_size+=region->step,allocator++)
{
result = fast_mblock_init_ex2(&allocator->mblock, NULL, element_size,
region->alloc_elements_once, NULL, NULL, acontext->need_lock,
fast_allocator_malloc_trunk_check,
fast_allocator_malloc_trunk_notify_func, acontext);
if (result != 0)
{
break;
}
ADD_ALLOCATOR_TO_ARRAY(acontext, allocator, true);
}
return result;
}
static void region_destroy(struct fast_allocator_context *acontext,
struct fast_region_info *region)
{
int element_size;
struct fast_allocator_info *allocator;
allocator = region->allocators;
for (element_size=region->start+region->step; element_size<=region->end;
element_size+=region->step,allocator++)
{
fast_mblock_destroy(&allocator->mblock);
}
free(region->allocators);
region->allocators = NULL;
}
int fast_allocator_init_ex(struct fast_allocator_context *acontext,
struct fast_region_info *regions, const int region_count,
const int64_t alloc_bytes_limit, const double expect_usage_ratio,
const int reclaim_interval, const bool need_lock)
{
int result;
int bytes;
int previous_end;
struct fast_region_info *pRegion;
struct fast_region_info *region_end;
srand(time(NULL));
memset(acontext, 0, sizeof(*acontext));
if (region_count <= 0)
{
return EINVAL;
}
bytes = sizeof(struct fast_region_info) * region_count;
acontext->regions = (struct fast_region_info *)malloc(bytes);
if (acontext->regions == NULL)
{
result = errno != 0 ? errno : ENOMEM;
logError("file: "__FILE__", line: %d, "
"malloc %d bytes fail, errno: %d, error info: %s",
__LINE__, bytes, result, STRERROR(result));
return result;
}
memcpy(acontext->regions, regions, bytes);
acontext->region_count = region_count;
acontext->alloc_bytes_limit = alloc_bytes_limit;
if (expect_usage_ratio < 0.01 || expect_usage_ratio > 1.00)
{
acontext->allocator_array.expect_usage_ratio = 0.80;
}
else
{
acontext->allocator_array.expect_usage_ratio = expect_usage_ratio;
}
acontext->allocator_array.malloc_bytes_limit = alloc_bytes_limit /
acontext->allocator_array.expect_usage_ratio;
acontext->allocator_array.reclaim_interval = reclaim_interval;
acontext->need_lock = need_lock;
result = 0;
previous_end = 0;
region_end = acontext->regions + acontext->region_count;
for (pRegion=acontext->regions; pRegion<region_end; pRegion++)
{
if (pRegion->start != previous_end)
{
logError("file: "__FILE__", line: %d, "
"invalid start: %d != last end: %d",
__LINE__, pRegion->start, previous_end);
result = EINVAL;
break;
}
if (pRegion->start >= pRegion->end)
{
logError("file: "__FILE__", line: %d, "
"invalid start: %d >= end: %d",
__LINE__, pRegion->start, pRegion->end);
result = EINVAL;
break;
}
if (pRegion->step <= 0 || !is_power2(pRegion->step))
{
logError("file: "__FILE__", line: %d, "
"invalid step: %d",
__LINE__, pRegion->step);
result = EINVAL;
break;
}
if (pRegion->start % pRegion->step != 0)
{
logError("file: "__FILE__", line: %d, "
"invalid start: %d, must multiple of step: %d",
__LINE__, pRegion->start, pRegion->step);
result = EINVAL;
break;
}
if (pRegion->end % pRegion->step != 0)
{
logError("file: "__FILE__", line: %d, "
"invalid end: %d, must multiple of step: %d",
__LINE__, pRegion->end, pRegion->step);
result = EINVAL;
break;
}
previous_end = pRegion->end;
if ((result=region_init(acontext, pRegion)) != 0)
{
break;
}
}
if (result != 0)
{
return result;
}
if ((result=allocator_array_check_capacity(acontext, 1)) != 0)
{
return result;
}
ADD_ALLOCATOR_TO_ARRAY(acontext, &malloc_allocator, false);
/*
logInfo("sizeof(struct allocator_wrapper): %d, allocator_array count: %d",
(int)sizeof(struct allocator_wrapper), acontext->allocator_array.count);
*/
return result;
}
int fast_allocator_init(struct fast_allocator_context *acontext,
const int64_t alloc_bytes_limit, const double expect_usage_ratio,
const int reclaim_interval, const bool need_lock)
{
#define DEFAULT_REGION_COUNT 5
struct fast_region_info regions[DEFAULT_REGION_COUNT];
FAST_ALLOCATOR_INIT_REGION(regions[0], 0, 256, 8, 4096);
FAST_ALLOCATOR_INIT_REGION(regions[1], 256, 1024, 16, 1024);
FAST_ALLOCATOR_INIT_REGION(regions[2], 1024, 4096, 64, 256);
FAST_ALLOCATOR_INIT_REGION(regions[3], 4096, 16384, 256, 64);
FAST_ALLOCATOR_INIT_REGION(regions[4], 16384, 65536, 1024, 16);
return fast_allocator_init_ex(acontext, regions,
DEFAULT_REGION_COUNT, alloc_bytes_limit,
expect_usage_ratio, reclaim_interval, need_lock);
}
void fast_allocator_destroy(struct fast_allocator_context *acontext)
{
struct fast_region_info *pRegion;
struct fast_region_info *region_end;
if (acontext->regions != NULL)
{
region_end = acontext->regions + acontext->region_count;
for (pRegion=acontext->regions; pRegion<region_end; pRegion++)
{
region_destroy(acontext, pRegion);
}
free(acontext->regions);
}
if (acontext->allocator_array.allocators != NULL)
{
free(acontext->allocator_array.allocators);
}
memset(acontext, 0, sizeof(*acontext));
}
static struct fast_allocator_info *get_allocator(struct fast_allocator_context *acontext,
int *alloc_bytes)
{
struct fast_region_info *pRegion;
struct fast_region_info *region_end;
region_end = acontext->regions + acontext->region_count;
for (pRegion=acontext->regions; pRegion<region_end; pRegion++)
{
if (*alloc_bytes <= pRegion->end)
{
*alloc_bytes = BYTES_ALIGN(*alloc_bytes, pRegion->pad_mask);
return pRegion->allocators + ((*alloc_bytes -
pRegion->start) / pRegion->step) - 1;
}
}
return &malloc_allocator;
}
int fast_allocator_retry_reclaim(struct fast_allocator_context *acontext,
int64_t *total_reclaim_bytes)
{
int64_t malloc_bytes;
int reclaim_count;
int i;
*total_reclaim_bytes = 0;
if (acontext->allocator_array.last_reclaim_time +
acontext->allocator_array.reclaim_interval > get_current_time())
{
return EAGAIN;
}
acontext->allocator_array.last_reclaim_time = get_current_time();
malloc_bytes = acontext->allocator_array.malloc_bytes;
logInfo("malloc_bytes: %"PRId64", ratio: %f", malloc_bytes, (double)acontext->alloc_bytes /
(double)malloc_bytes);
if (malloc_bytes == 0 || (double)acontext->alloc_bytes /
(double)malloc_bytes >= acontext->allocator_array.expect_usage_ratio)
{
return EAGAIN;
}
for (i=0; i< acontext->allocator_array.count; i++)
{
if (fast_mblock_reclaim(&acontext->allocator_array.
allocators[i]->mblock, 0, &reclaim_count, NULL) == 0)
{
logInfo("reclaim_count: %d", reclaim_count);
*total_reclaim_bytes += reclaim_count *
acontext->allocator_array.allocators[i]->
mblock.info.trunk_size;
}
}
return *total_reclaim_bytes > 0 ? 0 : EAGAIN;
}
void *fast_allocator_alloc(struct fast_allocator_context *acontext,
const int bytes)
{
int alloc_bytes;
int64_t total_reclaim_bytes;
struct fast_allocator_info *allocator_info;
void *ptr;
if (bytes < 0)
{
return NULL;
}
alloc_bytes = sizeof(struct allocator_wrapper) + bytes;
allocator_info = get_allocator(acontext, &alloc_bytes);
if (allocator_info->pooled)
{
ptr = fast_mblock_alloc_object(&allocator_info->mblock);
if (ptr == NULL)
{
if (acontext->allocator_array.reclaim_interval <= 0)
{
return NULL;
}
if (fast_allocator_retry_reclaim(acontext, &total_reclaim_bytes) != 0)
{
return NULL;
}
logInfo("reclaimed bytes: %"PRId64, total_reclaim_bytes);
if (total_reclaim_bytes < allocator_info->mblock.info.trunk_size)
{
return NULL;
}
ptr = fast_mblock_alloc_object(&allocator_info->mblock);
if (ptr == NULL)
{
return NULL;
}
}
}
else
{
if (fast_allocator_malloc_trunk_check(alloc_bytes, acontext) != 0)
{
return NULL;
}
ptr = malloc(alloc_bytes);
if (ptr == NULL)
{
return NULL;
}
fast_allocator_malloc_trunk_notify_func(alloc_bytes, acontext);
}
((struct allocator_wrapper *)ptr)->allocator_index = allocator_info->index;
((struct allocator_wrapper *)ptr)->magic_number = allocator_info->magic_number;
((struct allocator_wrapper *)ptr)->alloc_bytes = alloc_bytes;
__sync_add_and_fetch(&acontext->alloc_bytes, alloc_bytes);
return (char *)ptr + sizeof(struct allocator_wrapper);
}
void fast_allocator_free(struct fast_allocator_context *acontext, void *ptr)
{
struct allocator_wrapper *pWrapper;
struct fast_allocator_info *allocator_info;
void *obj;
if (ptr == NULL)
{
return;
}
obj = (char *)ptr - sizeof(struct allocator_wrapper);
pWrapper = (struct allocator_wrapper *)obj;
if (pWrapper->allocator_index < 0 || pWrapper->allocator_index >=
acontext->allocator_array.count)
{
logError("file: "__FILE__", line: %d, "
"invalid allocator index: %d",
__LINE__, pWrapper->allocator_index);
return;
}
allocator_info = acontext->allocator_array.allocators[pWrapper->allocator_index];
if (pWrapper->magic_number != allocator_info->magic_number)
{
logError("file: "__FILE__", line: %d, "
"invalid magic number: %d != %d",
__LINE__, pWrapper->magic_number,
allocator_info->magic_number);
return;
}
__sync_sub_and_fetch(&acontext->alloc_bytes, pWrapper->alloc_bytes);
pWrapper->allocator_index = -1;
pWrapper->magic_number = 0;
if (allocator_info->pooled)
{
fast_mblock_free_object(&allocator_info->mblock, obj);
}
else
{
fast_allocator_malloc_trunk_notify_func(-1 * pWrapper->alloc_bytes, acontext);
free(obj);
}
}

View File

@ -0,0 +1,147 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
//fast_allocator.h
#ifndef _FAST_ALLOCATOR_H
#define _FAST_ALLOCATOR_H
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include "common_define.h"
#include "fast_mblock.h"
struct fast_allocator_info
{
int index;
short magic_number;
bool pooled;
struct fast_mblock_man mblock;
};
struct fast_region_info
{
int start; //exclude
int end; //include
int step;
int alloc_elements_once;
int pad_mask; //for internal use
struct fast_allocator_info *allocators;
};
struct fast_allocator_array
{
int count;
int alloc;
int reclaim_interval; //<= 0 for never reclaim
int last_reclaim_time;
volatile int64_t malloc_bytes; //total alloc bytes
int64_t malloc_bytes_limit; //water mark bytes for malloc
double expect_usage_ratio;
struct fast_allocator_info **allocators;
};
struct fast_allocator_context
{
struct fast_region_info *regions;
int region_count;
struct fast_allocator_array allocator_array;
int64_t alloc_bytes_limit; //mater mark bytes for alloc
volatile int64_t alloc_bytes; //total alloc bytes
bool need_lock; //if need mutex lock for acontext
};
#define FAST_ALLOCATOR_INIT_REGION(region, _start, _end, _step, _alloc_once) \
do { \
region.start = _start; \
region.end = _end; \
region.step = _step; \
region.alloc_elements_once = _alloc_once; \
} while(0)
#ifdef __cplusplus
extern "C" {
#endif
/**
allocator init by default region allocators
parameters:
acontext: the context pointer
alloc_bytes_limit: the alloc limit, 0 for no limit
expect_usage_ratio: the trunk usage ratio
reclaim_interval: reclaim interval in second, 0 for never reclaim
need_lock: if need lock
return error no, 0 for success, != 0 fail
*/
int fast_allocator_init(struct fast_allocator_context *acontext,
const int64_t alloc_bytes_limit, const double expect_usage_ratio,
const int reclaim_interval, const bool need_lock);
/**
allocator init
parameters:
acontext: the context pointer
regions: the region array
region_count: the region count
alloc_bytes_limit: the alloc limit, 0 for no limit
expect_usage_ratio: the trunk usage ratio
reclaim_interval: reclaim interval in second, 0 for never reclaim
need_lock: if need lock
return error no, 0 for success, != 0 fail
*/
int fast_allocator_init_ex(struct fast_allocator_context *acontext,
struct fast_region_info *regions, const int region_count,
const int64_t alloc_bytes_limit, const double expect_usage_ratio,
const int reclaim_interval, const bool need_lock);
/**
allocator destroy
parameters:
acontext: the context pointer
*/
void fast_allocator_destroy(struct fast_allocator_context *acontext);
/**
alloc memory from the context
parameters:
acontext: the context pointer
bytes: alloc bytes
return the alloced pointer, return NULL if fail
*/
void* fast_allocator_alloc(struct fast_allocator_context *acontext,
const int bytes);
/**
free a node (put a node to the context)
parameters:
acontext: the context pointer
ptr: the pointer to free
return none
*/
void fast_allocator_free(struct fast_allocator_context *acontext, void *ptr);
/**
retry reclaim free trunks
parameters:
acontext: the context pointer
total_reclaim_bytes: return total reclaim bytes
return error no, 0 for success, != 0 fail
*/
int fast_allocator_retry_reclaim(struct fast_allocator_context *acontext,
int64_t *total_reclaim_bytes);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,129 @@
//fast_blocked_queue.c
#include <errno.h>
#include <pthread.h>
#include <inttypes.h>
#include "logger.h"
#include "shared_func.h"
#include "pthread_func.h"
#include "fast_blocked_queue.h"
int blocked_queue_init(struct fast_blocked_queue *pQueue)
{
int result;
if ((result=init_pthread_lock(&(pQueue->lock))) != 0)
{
logError("file: "__FILE__", line: %d, "
"init_pthread_lock fail, errno: %d, error info: %s",
__LINE__, result, STRERROR(result));
return result;
}
result = pthread_cond_init(&(pQueue->cond), NULL);
if (result != 0)
{
logError("file: "__FILE__", line: %d, "
"pthread_cond_init fail, "
"errno: %d, error info: %s",
__LINE__, result, STRERROR(result));
return result;
}
pQueue->head = NULL;
pQueue->tail = NULL;
return 0;
}
void blocked_queue_destroy(struct fast_blocked_queue *pQueue)
{
pthread_cond_destroy(&(pQueue->cond));
pthread_mutex_destroy(&(pQueue->lock));
}
int blocked_queue_push(struct fast_blocked_queue *pQueue,
struct fast_task_info *pTask)
{
int result;
bool notify;
if ((result=pthread_mutex_lock(&(pQueue->lock))) != 0)
{
logError("file: "__FILE__", line: %d, " \
"call pthread_mutex_lock fail, " \
"errno: %d, error info: %s", \
__LINE__, result, STRERROR(result));
return result;
}
pTask->next = NULL;
if (pQueue->tail == NULL)
{
pQueue->head = pTask;
notify = true;
}
else
{
pQueue->tail->next = pTask;
notify = false;
}
pQueue->tail = pTask;
if ((result=pthread_mutex_unlock(&(pQueue->lock))) != 0)
{
logError("file: "__FILE__", line: %d, " \
"call pthread_mutex_unlock fail, " \
"errno: %d, error info: %s", \
__LINE__, result, STRERROR(result));
}
if (notify)
{
pthread_cond_signal(&(pQueue->cond));
}
return 0;
}
struct fast_task_info *blocked_queue_pop(struct fast_blocked_queue *pQueue)
{
struct fast_task_info *pTask;
int result;
if ((result=pthread_mutex_lock(&(pQueue->lock))) != 0)
{
logError("file: "__FILE__", line: %d, " \
"call pthread_mutex_lock fail, " \
"errno: %d, error info: %s", \
__LINE__, result, STRERROR(result));
return NULL;
}
pTask = pQueue->head;
if (pTask == NULL)
{
pthread_cond_wait(&(pQueue->cond), &(pQueue->lock));
pTask = pQueue->head;
}
if (pTask != NULL)
{
pQueue->head = pTask->next;
if (pQueue->head == NULL)
{
pQueue->tail = NULL;
}
}
if ((result=pthread_mutex_unlock(&(pQueue->lock))) != 0)
{
logError("file: "__FILE__", line: %d, " \
"call pthread_mutex_unlock fail, " \
"errno: %d, error info: %s", \
__LINE__, result, STRERROR(result));
}
return pTask;
}

View File

@ -0,0 +1,61 @@
/**
* Copyright (C) 2008 Happy Fish / YuQing
*
* FastDFS may be copied only under the terms of the GNU General
* Public License V3, which may be found in the FastDFS source kit.
* Please visit the FastDFS Home Page http://www.fastken.com/ for more detail.
**/
//fast_blocked_queue.h
#ifndef _FAST_BLOCKED_QUEUE_H
#define _FAST_BLOCKED_QUEUE_H
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <pthread.h>
#include "common_define.h"
#include "fast_task_queue.h"
struct fast_blocked_queue
{
struct fast_task_info *head;
struct fast_task_info *tail;
pthread_mutex_t lock;
pthread_cond_t cond;
};
#ifdef __cplusplus
extern "C" {
#endif
int blocked_queue_init(struct fast_blocked_queue *pQueue);
void blocked_queue_destroy(struct fast_blocked_queue *pQueue);
static inline void blocked_queue_terminate(struct fast_blocked_queue *pQueue)
{
pthread_cond_signal(&(pQueue->cond));
}
static inline void blocked_queue_terminate_all(struct fast_blocked_queue *pQueue,
const int count)
{
int i;
for (i=0; i<count; i++)
{
pthread_cond_signal(&(pQueue->cond));
}
}
int blocked_queue_push(struct fast_blocked_queue *pQueue,
struct fast_task_info *pTask);
struct fast_task_info *blocked_queue_pop(struct fast_blocked_queue *pQueue);
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,202 @@
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <stdarg.h>
#include <unistd.h>
#include <inttypes.h>
#include <errno.h>
#include <sys/stat.h>
#include "logger.h"
#include "shared_func.h"
#include "fast_buffer.h"
int fast_buffer_init_ex(FastBuffer *buffer, const int init_capacity)
{
buffer->length = 0;
if (init_capacity > 0)
{
buffer->alloc_size = init_capacity;
}
else
{
buffer->alloc_size = 256;
}
buffer->data = (char *)malloc(buffer->alloc_size);
if (buffer->data == NULL)
{
logError("file: "__FILE__", line: %d, "
"malloc %d bytes fail", __LINE__, buffer->alloc_size);
return ENOMEM;
}
*(buffer->data) = '\0';
return 0;
}
void fast_buffer_destroy(FastBuffer *buffer)
{
if (buffer->data != NULL)
{
free(buffer->data);
buffer->data = NULL;
buffer->length = 0;
}
}
int fast_buffer_check(FastBuffer *buffer, const int inc_len)
{
int alloc_size;
char *buff;
if (buffer->alloc_size > buffer->length + inc_len)
{
return 0;
}
alloc_size = buffer->alloc_size * 2;
while (alloc_size <= buffer->length + inc_len)
{
alloc_size *= 2;
}
buff = (char *)malloc(alloc_size);
if (buff == NULL)
{
logError("file: "__FILE__", line: %d, "
"malloc %d bytes fail", __LINE__, alloc_size);
return ENOMEM;
}
if (buffer->length > 0)
{
memcpy(buff, buffer->data, buffer->length);
}
free(buffer->data);
buffer->data = buff;
buffer->alloc_size = alloc_size;
return 0;
}
int fast_buffer_append(FastBuffer *buffer, const char *format, ...)
{
va_list ap;
int result;
int len;
if ((result=fast_buffer_check(buffer, 64)) != 0)
{
return result;
}
va_start(ap, format);
len = vsnprintf(buffer->data + buffer->length,
buffer->alloc_size - buffer->length, format, ap);
va_end(ap);
if (len < buffer->alloc_size - buffer->length)
{
buffer->length += len;
}
else //maybe full, realloc and try again
{
if ((result=fast_buffer_check(buffer, len)) == 0)
{
va_start(ap, format);
buffer->length += vsnprintf(buffer->data + buffer->length,
buffer->alloc_size - buffer->length, format, ap);
va_end(ap);
}
else
{
*(buffer->data + buffer->length) = '\0'; //restore
}
}
return result;
}
int fast_buffer_append_buff(FastBuffer *buffer, const char *data, const int len)
{
int result;
if (len <= 0)
{
return 0;
}
if ((result=fast_buffer_check(buffer, len)) != 0)
{
return result;
}
memcpy(buffer->data + buffer->length, data, len);
buffer->length += len;
*(buffer->data + buffer->length) = '\0';
return 0;
}
int fast_buffer_append_int(FastBuffer *buffer, const int n)
{
int result;
if ((result=fast_buffer_check(buffer, 16)) != 0)
{
return result;
}
buffer->length += sprintf(buffer->data + buffer->length, "%d", n);
return 0;
}
int fast_buffer_append_int64(FastBuffer *buffer, const int64_t n)
{
int result;
if ((result=fast_buffer_check(buffer, 32)) != 0)
{
return result;
}
buffer->length += sprintf(buffer->data + buffer->length, "%"PRId64, n);
return 0;
}
int fast_buffer_append_file(FastBuffer *buffer, const char *filename)
{
struct stat st;
int result;
int64_t file_size;
if (stat(filename, &st) != 0) {
result = errno != 0 ? errno : ENOENT;
if (result == ENOENT) {
logError("file: "__FILE__", line: %d, "
"file %s not exist!", __LINE__,
filename);
} else {
logError("file: "__FILE__", line: %d, "
"stat file %s fail, "
"result: %d, error info: %s", __LINE__,
filename, result, strerror(result));
}
return result;
}
if (!S_ISREG(st.st_mode)) {
logError("file: "__FILE__", line: %d, "
"file %s is NOT a regular file!",
__LINE__, filename);
return EINVAL;
}
file_size = st.st_size + 1;
if ((result=fast_buffer_check(buffer, file_size)) != 0) {
return result;
}
if ((result=getFileContentEx(filename, buffer->data + buffer->length,
0, &file_size)) != 0)
{
return result;
}
buffer->length += file_size;
return 0;
}

View File

@ -0,0 +1,76 @@
#ifndef __FAST_BUFFER_H__
#define __FAST_BUFFER_H__
#include <stdint.h>
#include "common_define.h"
typedef struct fast_buffer {
char *data;
int alloc_size;
int length;
} FastBuffer;
#ifdef __cplusplus
extern "C" {
#endif
static inline int fast_buffer_length(FastBuffer *buffer)
{
return buffer->length;
}
static inline char *fast_buffer_data(FastBuffer *buffer)
{
return buffer->data;
}
int fast_buffer_init_ex(FastBuffer *buffer, const int init_capacity);
static inline int fast_buffer_init(FastBuffer *buffer)
{
return fast_buffer_init_ex(buffer, 0);
}
#define fast_buffer_clear(buffer) fast_buffer_reset(buffer)
static inline void fast_buffer_reset(FastBuffer *buffer)
{
buffer->length = 0;
*buffer->data = '\0';
}
void fast_buffer_destroy(FastBuffer *buffer);
int fast_buffer_check(FastBuffer *buffer, const int inc_len);
int fast_buffer_append(FastBuffer *buffer, const char *format, ...);
int fast_buffer_append_buff(FastBuffer *buffer, const char *data, const int len);
int fast_buffer_append_int(FastBuffer *buffer, const int n);
int fast_buffer_append_int64(FastBuffer *buffer, const int64_t n);
int fast_buffer_append_file(FastBuffer *buffer, const char *filename);
static inline int fast_buffer_append_string(FastBuffer *buffer, const char *str)
{
return fast_buffer_append_buff(buffer, str, strlen(str));
}
static inline int fast_buffer_append_string2(FastBuffer *buffer, const string_t *add)
{
return fast_buffer_append_buff(buffer, add->str, add->len);
}
static inline int fast_buffer_append_buffer(FastBuffer *buffer, FastBuffer *src)
{
return fast_buffer_append_buff(buffer, src->data, src->length);
}
#ifdef __cplusplus
}
#endif
#endif

View File

@ -0,0 +1,26 @@
tmp_src_filename=_fast_check_bits_.c
cat <<EOF > $tmp_src_filename
#include <stdio.h>
#include <unistd.h>
#include <fcntl.h>
int main()
{
printf("%d\n", (int)sizeof(long));
return 0;
}
EOF
gcc -D_FILE_OFFSET_BITS=64 -o a.out $tmp_src_filename
OS_BITS=`./a.out`
rm $tmp_src_filename a.out
TARGET_LIB="/usr/local/lib"
if [ "`id -u`" = "0" ]; then
ln -fs $TARGET_LIB/libfastcommon.so.1 /usr/lib/libfastcommon.so
if [ "$OS_BITS" = "8" ]; then
ln -fs $TARGET_LIB/libfastcommon.so.1 /usr/lib64/libfastcommon.so
fi
fi

Some files were not shown because too many files have changed in this diff Show More